Пример #1
0
 def get_agent(self, ego, policy, max_episode_steps):
     observation_adapter = None
     if ego:
         config = get_agent_config_by_type(policy)
         agent_spec = AgentSpec(
             interface=config["interface"],
             agent_params=dict(config["policy"], checkpoint_dir=ego_model),
             agent_builder=config["policy_class"],
         )
         agent_spec.interface.max_episode_steps = max_episode_steps
         observation_adapter = IntersectionAdapter(
             agent_id="AGENT_007",
             social_vehicle_config=config["social_vehicle_config"],
             timestep_sec=config["env"]["timestep_sec"],
             **config["other"],
         )
     else:
         # Lane Following agent
         agent_spec = AgentSpec(
             interface=AgentInterface(
                 max_episode_steps=max_episode_steps,  # 10 mins
                 waypoints=True,
                 action=ActionSpaceType.Lane,
                 debug=False,
                 neighborhood_vehicles=NeighborhoodVehicles(radius=2000),
             ),
             agent_builder=DefaultPolicy,
         )
     return agent_spec, observation_adapter
Пример #2
0
def main(scenarios, sim_name, headless, num_episodes, seed, max_episode_steps=None):
    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(
            AgentType.LanerWithSpeed, max_episode_steps=max_episode_steps
        ),
        agent_builder=ChaseViaPointsAgent,
    )

    env = gym.make(
        "smarts.env:hiway-v0",
        scenarios=scenarios,
        agent_specs={AGENT_ID: agent_spec},
        sim_name=sim_name,
        headless=headless,
        visdom=False,
        timestep_sec=0.1,
        sumo_headless=True,
        seed=seed,
        # zoo_addrs=[("10.193.241.236", 7432)], # Sample server address (ip, port), to distribute social agents in remote server.
        # envision_record_data_replay_path="./data_replay",
    )

    for episode in episodes(n=num_episodes):
        agent = agent_spec.build_agent()
        observations = env.reset()
        episode.record_scenario(env.scenario_log)

        dones = {"__all__": False}
        while not dones["__all__"]:
            agent_obs = observations[AGENT_ID]
            agent_action = agent.act(agent_obs)
            observations, rewards, dones, infos = env.step({AGENT_ID: agent_action})
            episode.record_step(observations, rewards, dones, infos)

    env.close()
Пример #3
0
def main(scenarios, headless, seed):
    scenarios_iterator = Scenario.scenario_variations(scenarios, [])
    for _ in scenarios:
        scenario = next(scenarios_iterator)
        agent_missions = scenario.discover_missions_of_traffic_histories()

        for agent_id, mission in agent_missions.items():
            scenario.set_ego_missions({agent_id: mission})

            agent_spec = AgentSpec(
                interface=AgentInterface.from_type(AgentType.Laner,
                                                   max_episode_steps=None),
                agent_builder=KeepLaneAgent,
            )

            agent = agent_spec.build_agent()

            smarts = SMARTS(
                agent_interfaces={agent_id: agent_spec.interface},
                traffic_sim=SumoTrafficSimulation(headless=True,
                                                  auto_start=True),
                envision=Envision(),
            )
            observations = smarts.reset(scenario)

            dones = {agent_id: False}
            while not dones[agent_id]:
                agent_obs = observations[agent_id]
                agent_action = agent.act(agent_obs)

                observations, rewards, dones, infos = smarts.step(
                    {agent_id: agent_action})
Пример #4
0
def main(scenarios, headless, num_episodes, seed):
    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner, max_episode_steps=None),
        agent_builder=KeepLaneAgent,
    )

    env = gym.make(
        "smarts.env:hiway-v0",
        scenarios=scenarios,
        agent_specs={AGENT_ID: agent_spec},
        headless=headless,
        visdom=False,
        timestep_sec=0.1,
        sumo_headless=True,
        seed=seed,
        # envision_record_data_replay_path="./data_replay",
    )

    for episode in episodes(n=num_episodes):
        agent = agent_spec.build_agent()
        observations = env.reset()
        episode.record_scenario(env.scenario_log)

        dones = {"__all__": False}
        while not dones["__all__"]:
            agent_obs = observations[AGENT_ID]
            agent_action = agent.act(agent_obs)
            observations, rewards, dones, infos = env.step({AGENT_ID: agent_action})
            episode.record_step(observations, rewards, dones, infos)

    env.close()
Пример #5
0
def main(
    scenarios,
    headless,
    num_episodes,
    seed,
):
    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(
            AgentType.StandardWithAbsoluteSteering, max_episode_steps=3000),
        policy_builder=HumanKeyboardPolicy,
    )

    env = gym.make(
        "smarts.env:hiway-v0",
        scenarios=scenarios,
        agent_specs={AGENT_ID: agent_spec},
        headless=headless,
        timestep_sec=0.1,
        seed=seed,
    )

    for episode in episodes(n=num_episodes):
        agent = agent_spec.build_agent()
        observations = env.reset()
        episode.record_scenario(env.scenario_log)

        dones = {"__all__": False}
        while not dones["__all__"]:
            agent_obs = observations[AGENT_ID]
            agent_action = agent.act(agent_obs)
            observations, rewards, dones, infos = env.step(
                {AGENT_ID: agent_action})
            episode.record_step(observations, rewards, dones, infos)

    env.close()
Пример #6
0
def test_graceful_interrupt(monkeypatch):
    """SMARTS should only throw a KeyboardInterript exception."""

    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner),
        agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
    )
    agent = agent_spec.build_agent()
    env = build_env(agent_spec)

    with pytest.raises(KeyboardInterrupt):
        obs = env.reset()

        # To simulate a user interrupting the sim (e.g. ctrl-c). We just need to
        # hook in to some function that SMARTS calls internally (like this one).
        with mock.patch(
            "smarts.core.sensors.Sensors.observe", side_effect=KeyboardInterrupt
        ):
            for episode in range(10):
                obs, _, _, _ = env.step({AGENT_ID: agent.act(obs)})

        assert episode == 0, "SMARTS should have been interrupted, ending early"

    with pytest.raises(SMARTSNotSetupError):
        env.step({AGENT_ID: agent.act(obs)})
Пример #7
0
def test_building_agent_with_dict_params():
    agent_spec = AgentSpec(
        agent_params={"y": 2, "x": 1},
        agent_builder=lambda x, y: Agent.from_function(lambda _: x / y),
    )

    agent = agent_spec.build_agent()
    assert agent.act("dummy observation") == 1 / 2
Пример #8
0
def test_building_agent_with_tuple_params():
    agent_spec = AgentSpec(
        agent_params=(32, 41),
        agent_builder=lambda x, y: Agent.from_function(lambda _: (x, y)),
    )

    agent = agent_spec.build_agent()
    assert agent.act("dummy observation") == (32, 41)
Пример #9
0
def test_building_agent_with_list_or_tuple_params():
    agent_spec = AgentSpec(
        policy_params=[32, 41],
        policy_builder=lambda x, y: AgentPolicy.from_function(lambda _:
                                                              (x, y)),
    )

    agent = agent_spec.build_agent()
    assert agent.act("dummy observation") == (32, 41)
Пример #10
0
def train(training_scenarios, evaluation_scenarios, sim_name, headless,
          num_episodes, seed):
    agent_params = {"input_dims": 4, "hidden_dims": 7, "output_dims": 3}
    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Standard,
                                           max_episode_steps=5000),
        agent_params=agent_params,
        agent_builder=PyTorchAgent,
        observation_adapter=observation_adapter,
    )

    env = gym.make(
        "smarts.env:hiway-v0",
        scenarios=training_scenarios,
        agent_specs={AGENT_ID: agent_spec},
        sim_name=sim_name,
        headless=headless,
        fixed_timestep_sec=0.1,
        seed=seed,
    )

    steps = 0
    for episode in episodes(n=num_episodes):
        agent = agent_spec.build_agent()
        observations = env.reset()
        episode.record_scenario(env.scenario_log)

        dones = {"__all__": False}
        while not dones["__all__"]:
            agent_obs = observations[AGENT_ID]
            agent_action = agent.act(agent_obs)
            observations, rewards, dones, infos = env.step(
                {AGENT_ID: agent_action})
            episode.record_step(observations, rewards, dones, infos)
            steps += 1

            if steps % 500 == 0:
                print("Evaluating agent")

                # We construct an evaluation agent based on the saved
                # state of the agent in training.
                model_path = tempfile.mktemp()
                agent.save(model_path)

                eval_agent_spec = agent_spec.replace(
                    agent_params=dict(agent_params, model_path=model_path))

                # Remove the call to ray.wait if you want evaluation to run
                # in parallel with training
                ray.wait([
                    evaluate.remote(eval_agent_spec, evaluation_scenarios,
                                    headless, seed)
                ])

    env.close()
Пример #11
0
def test_graceful_shutdown():
    """SMARTS should not throw any exceptions when shutdown."""
    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner),
        agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
    )
    env = build_env(agent_spec)
    agent = agent_spec.build_agent()
    obs = env.reset()
    for _ in range(10):
        obs = env.step({AGENT_ID: agent.act(obs)})

    env.close()
Пример #12
0
def entrypoint(
    goal_is_nearby_threshold=40,
    lane_end_threshold=51,
    lane_crash_distance_threshold=6,
    lane_crash_ttc_threshold=2,
    intersection_crash_distance_threshold=6,
    intersection_crash_ttc_threshold=5,
    target_speed=15,
    lane_change_speed=12.5,
):
    with pkg_resources.path(rl_agent, "checkpoint") as checkpoint_path:
        return AgentSpec(
            interface=agent_interface,
            observation_adapter=get_observation_adapter(
                goal_is_nearby_threshold=goal_is_nearby_threshold,
                lane_end_threshold=lane_end_threshold,
                lane_crash_distance_threshold=lane_crash_distance_threshold,
                lane_crash_ttc_threshold=lane_crash_ttc_threshold,
                intersection_crash_distance_threshold=
                intersection_crash_distance_threshold,
                intersection_crash_ttc_threshold=
                intersection_crash_ttc_threshold,
            ),
            action_adapter=get_action_adapter(
                target_speed=target_speed,
                lane_change_speed=lane_change_speed,
            ),
            agent_builder=lambda: RLAgent(
                load_path=str((checkpoint_path / "checkpoint").absolute()),
                policy_name="default_policy",
                observation_space=OBSERVATION_SPACE,
                action_space=ACTION_SPACE,
            ),
        )
Пример #13
0
def _make_agent_specs(num_agent):
    agent_specs = {
        "AGENT_" + str(agent_id): AgentSpec(
            interface=AgentInterface(
                rgb=RGB(),
                action=ActionSpaceType.Lane,
            ),
            agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
            observation_adapter=lambda obs: obs.top_down_rgb.data,
            reward_adapter=lambda obs, reward: reward,
            info_adapter=lambda obs, reward, info: info["score"],
        )
        for agent_id in range(num_agent)
    }

    obs_space = gym.spaces.Dict({
        "AGENT_" + str(agent_id): gym.spaces.Box(
            low=0,
            high=255,
            shape=(
                agent_specs["AGENT_" + str(agent_id)].interface.rgb.width,
                agent_specs["AGENT_" + str(agent_id)].interface.rgb.height,
                3,
            ),
            dtype=np.uint8,
        )
        for agent_id in range(num_agent)
    })

    return agent_specs, obs_space
def main(scenarios, headless, seed):
    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner,
                                           max_episode_steps=None),
        agent_builder=None,
        observation_adapter=None,
    )

    smarts = SMARTS(
        agent_interfaces={},
        traffic_sim=SumoTrafficSimulation(headless=True, auto_start=True),
        envision=Envision(),
    )
    scenarios_iterator = Scenario.scenario_variations(
        scenarios,
        list([]),
    )

    smarts.reset(next(scenarios_iterator))

    for _ in range(5000):
        smarts.step({})
        smarts.attach_sensors_to_vehicles(
            agent_spec, smarts.vehicle_index.social_vehicle_ids())
        obs, _, _, _ = smarts.observe_from(
            smarts.vehicle_index.social_vehicle_ids())
def agent_spec():
    return AgentSpec(
        interface=AgentInterface.from_type(AgentType.TrajectoryInterpolator,
                                           neighborhood_vehicles=True),
        agent_builder=WithTimeTrajectoryAgent,
        agent_params=None,
    )
Пример #16
0
def replay_entrypoint(
    save_directory,
    id,
    wrapped_agent_locator,
    wrapped_agent_params=None,
    read=False,
):
    if wrapped_agent_params is None:
        wrapped_agent_params = {}
    from .replay_agent import ReplayAgent

    internal_spec = make(wrapped_agent_locator, **wrapped_agent_params)
    global social_index
    global replay_save_dir
    global replay_read
    spec = AgentSpec(
        interface=internal_spec.interface,
        agent_params={
            "save_directory": replay_save_dir,
            "id": f"{id}_{social_index}",
            "internal_spec": internal_spec,
            "wrapped_agent_params": wrapped_agent_params,
            "read": replay_read,
        },
        agent_builder=ReplayAgent,
    )
    social_index += 1
    return spec
Пример #17
0
def agent_spec(agent_and_agent_type):
    return AgentSpec(
        interface=AgentInterface.from_type(
            agent_and_agent_type[1], max_episode_steps=5000
        ),
        agent_builder=agent_and_agent_type[0],
    )
Пример #18
0
    def __init__(self, save_directory, id, read: bool,
                 internal_spec: AgentSpec):
        import smarts.core

        if smarts.core.current_seed() is None:
            smarts.core.seed(42)

        self.save_directory = save_directory
        self._base_agent = internal_spec.build_agent()
        self._logger = logging.getLogger(self.__class__.__name__)
        global agent_index
        self.id = f"{id}_{agent_index}"
        agent_index += 1

        abs_path = os.path.abspath(save_directory)
        self._read = read
        file_mode = "wb" if not read else "rb"
        path = Path(f"{abs_path}/{self.id}")
        os.makedirs(abs_path, exist_ok=True)
        try:
            self._file = path.open(mode=file_mode)
        except FileNotFoundError as e:
            assert self._read
            self._logger.error(
                f"The file which you are trying to be read does not exist. "
                f"Make sure the {save_directory} directory passed is correct and has the agent file which is being read"
            )
            raise e
Пример #19
0
    def __init__(self, **kwargs):
        print(kwargs)
        self.episode_limit = kwargs['episode_limit']
        self.n_agents = kwargs['agent_num']
        self.observation_space = [
            gym.spaces.Box(low=-1e10, high=1e10, shape=(10, ))
        ] * self.n_agents
        self.action_space = [gym.spaces.Discrete(4)] * self.n_agents
        self.agent_ids = ["Agent %i" % i for i in range(self.n_agents)]
        self.n_actions = 4
        self.scenarios = [kwargs['scenarios']]

        self.headless = kwargs['headless']
        num_episodes = 100
        self.seed = kwargs['seed']

        self.agent_specs = {
            agent_id: AgentSpec(
                interface=AgentInterface.from_type(AgentType.Laner,
                                                   max_episode_steps=5000),
                observation_adapter=observation_adapter,
                reward_adapter=reward_adapter,
                action_adapter=action_adapter,
            )
            for agent_id in self.agent_ids
        }

        self.base_env = gym.make(
            "smarts.env:hiway-v0",
            scenarios=self.scenarios,
            agent_specs=self.agent_specs,
            headless=self.headless,
            seed=self.seed,
        )
        self.current_observations = self.base_env.reset()
Пример #20
0
def entrypoint(
    gains={
        "theta": 3.0,
        "position": 4.0,
        "obstacle": 3.0,
        "u_accel": 0.1,
        "u_yaw_rate": 1.0,
        "terminal": 0.01,
        "impatience": 0.01,
        "speed": 0.01,
    },
    debug=False,
    max_episode_steps=600,
):
    from .policy import Policy

    return AgentSpec(
        interface=AgentInterface(
            action=ActionSpaceType.Trajectory,
            waypoints=True,
            neighborhood_vehicles=True,
            max_episode_steps=max_episode_steps,
        ),
        policy_params={"gains": gains, "debug": debug,},
        policy_builder=Policy,
        perform_self_test=False,
    )
Пример #21
0
def entrypoint(
    gains={
        "theta": 3.0,
        "position": 4.0,
        "obstacle": 3.0,
        "u_accel": 0.1,
        "u_yaw_rate": 1.0,
        "terminal": 0.01,
        "impatience": 0.01,
        "speed": 0.01,
        "rate": 1,
    },
    debug=False,
    aggressiveness=0,
    max_episode_steps=None,
):
    from .agent import OpEnAgent

    return AgentSpec(
        interface=AgentInterface(
            action=ActionSpaceType.Trajectory,
            waypoints=True,
            neighborhood_vehicles=True,
            max_episode_steps=max_episode_steps,
            agent_behavior=AgentBehavior(aggressiveness=aggressiveness),
        ),
        agent_params={
            "gains": gains,
            "debug": debug,
        },
        agent_builder=OpEnAgent,
    )
Пример #22
0
def env_and_spec(action,
                 agent_type,
                 max_episode_steps,
                 scenarios,
                 seed=42,
                 agent_id="Agent-006"):
    class Policy(AgentPolicy):
        def act(self, obs):
            return action

    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(
            agent_type, max_episode_steps=max_episode_steps),
        policy_builder=Policy,
    )
    env = gym.make(
        "smarts.env:hiway-v0",
        scenarios=scenarios,
        agent_specs={agent_id: agent_spec},
        headless=True,
        visdom=False,
        timestep_sec=TIMESTEP_SEC,
        sumo_headless=True,
        seed=seed,
    )

    return (env, agent_spec)
Пример #23
0
def main(scenario):
    scenario_path = Path(scenario).absolute()
    agent_mission_count = Scenario.discover_agent_missions_count(scenario_path)

    assert agent_mission_count > 0, "agent mission count should larger than 0"

    agent_ids = [f"AGENT-{i}" for i in range(agent_mission_count)]

    agent_specs = {
        agent_id: AgentSpec(
            interface=AgentInterface.from_type(AgentType.Laner, max_episode_steps=None),
            agent_builder=RuleBasedAgent,
        )
        for agent_id in agent_ids
    }

    agents = {aid: agent_spec.build_agent() for aid, agent_spec in agent_specs.items()}

    env = HiWayEnv(scenarios=[scenario_path], agent_specs=agent_specs)

    while True:
        observations = env.reset()
        done = False
        while not done:
            agent_ids = list(observations.keys())
            actions = {aid: agents[aid].act(observations[aid]) for aid in agent_ids}
            observations, _, dones, _ = env.step(actions)
            done = dones["__all__"]
Пример #24
0
        def init_env():
            if True:
                agent_spec = AgentSpec(
                    interface=AgentInterface.from_type(
                        AgentType.Laner, max_episode_steps=all_args.episode_length
                    )
                )
                AGENT_ID = [str(i) for i in range(all_args.num_agents)]

                env = gym.make(
                    "smarts.env:hiway-v0",
                    scenarios=all_args.scenarios,
                    agent_specs={i: agent_spec for i in AGENT_ID},
                    headless=all_args.headless,
                    visdom=False,
                    timestep_sec=0.1,
                    sumo_headless=True,
                    seed=all_args.seed + rank * 1000,
                    # zoo_workers=[("143.110.210.157", 7432)], # Distribute social agents across these workers
                    auth_key=all_args.auth_key,
                    # envision_record_data_replay_path="./data_replay",
                )
                env = SmartWrapper(env, all_args.num_agents)
            else:
                print("Can not support the " +
                      all_args.env_name + "environment.")
                raise NotImplementedError
            # env.seed(all_args.seed + rank * 1000)
            return env
Пример #25
0
def main(_args):
    scenario_path = Path(args.scenario).absolute()
    mission_num = get_submission_num(scenario_path)

    if mission_num == -1:
        mission_num = 1

    AGENT_IDS = [f"AGENT-{i}" for i in range(mission_num)]

    agent_interface = AgentInterface.from_type(AgentType.Laner)

    agent_specs = [
        AgentSpec(interface=agent_interface,
                  policy_builder=lambda: KeeplanePolicy())
        for _ in range(mission_num)
    ]

    agents = dict(zip(AGENT_IDS, agent_specs))

    env = gym.make(
        "smarts.env:hiway-v0",
        scenarios=[scenario_path],
        agent_specs=agents,
        headless=_args.headless,
        visdom=False,
        seed=42,
    )

    agents = {
        _id: agent_spec.build_agent()
        for _id, agent_spec in agents.items()
    }

    import webbrowser
    webbrowser.open('http://localhost:8081/')

    for ie in range(30):
        step = 0
        print(f"\n---- Starting episode: {ie}...")
        observations = env.reset()
        total_reward = 0.0
        dones = {"__all__": False}

        while not dones["__all__"]:
            step += 1
            agent_actions = {
                _id: agents[_id].act(obs)
                for _id, obs in observations.items()
            }
            observations, rewards, dones, _ = env.step(agent_actions)
            total_reward += sum(rewards.values())

            if (step + 1) % 10 == 0:
                print(
                    f"* Episode: {ie} * step: {step} * acc-Reward: {total_reward}"
                )
        print("Accumulated reward:", total_reward)

    env.close()
Пример #26
0
def run_experiment(log_path, experiment_name, training_iteration=100):
    model_path = Path(__file__).parent / "model"
    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Standard,
                                           max_episode_steps=5000),
        policy=RLlibTFSavedModelAgent(
            model_path.absolute(),
            OBSERVATION_SPACE,
        ),
        observation_adapter=observation_adapter,
        reward_adapter=reward_adapter,
        action_adapter=action_adapter,
    )

    rllib_policies = {
        "policy": (
            None,
            OBSERVATION_SPACE,
            ACTION_SPACE,
            {
                "model": {
                    "custom_model": TrainingModel.NAME
                }
            },
        )
    }

    scenario_path = Path(__file__).parent / "../../scenarios/loop"
    scenario_path = str(scenario_path.absolute())

    tune_confg = {
        "env": RLlibHiWayEnv,
        "env_config": {
            "scenarios": [scenario_path],
            "seed": 42,
            "headless": True,
            "agent_specs": {
                "Agent-007": agent_spec
            },
        },
        "multiagent": {
            "policies": rllib_policies,
            "policy_mapping_fn": lambda _: "policy",
        },
        "log_level": "WARN",
        "num_workers": multiprocessing.cpu_count() - 1,
        "horizon": HORIZON,
    }

    analysis = tune.run(
        "PPO",
        name=experiment_name,
        stop={"training_iteration": training_iteration},
        max_failures=10,
        local_dir=log_path,
        config=tune_confg,
    )

    return analysis
Пример #27
0
def rllib_agent():
    def observation_adapter(env_observation):
        ego = env_observation.ego_vehicle_state
        waypoint_paths = env_observation.waypoint_paths
        wps = [path[0] for path in waypoint_paths]

        # distance of vehicle from center of lane
        closest_wp = min(wps, key=lambda wp: wp.dist_to(ego.position))
        signed_dist_from_center = closest_wp.signed_lateral_error(ego.position)
        lane_hwidth = closest_wp.lane_width * 0.5
        norm_dist_from_center = signed_dist_from_center / lane_hwidth

        return {
            "distance_from_center": np.array([norm_dist_from_center]),
            "angle_error": np.array([closest_wp.relative_heading(ego.heading)]),
            "speed": np.array([ego.speed]),
            "steering": np.array([ego.steering]),
        }

    def reward_adapter(env_obs, env_reward):
        return env_reward

    def action_adapter(model_action):
        throttle, brake, steering = model_action
        return np.array([throttle, brake, steering])

    def info_adapter(env_obs, env_reward, env_info):
        env_info[INFO_EXTRA_KEY] = "blah"
        return env_info

    # This action space should match the input to the action_adapter(..) function below.
    ACTION_SPACE = gym.spaces.Box(
        low=np.array([0.0, 0.0, -1.0]), high=np.array([1.0, 1.0, 1.0]), dtype=np.float32
    )

    # This observation space should match the output of observation_adapter(..) below
    OBSERVATION_SPACE = gym.spaces.Dict(
        {
            "distance_from_center": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
            "angle_error": gym.spaces.Box(low=-np.pi, high=np.pi, shape=(1,)),
            "speed": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
            "steering": gym.spaces.Box(low=-1e10, high=1e10, shape=(1,)),
        }
    )

    return {
        "agent_spec": AgentSpec(
            interface=AgentInterface.from_type(
                AgentType.Standard, max_episode_steps=500
            ),
            observation_adapter=observation_adapter,
            reward_adapter=reward_adapter,
            action_adapter=action_adapter,
            info_adapter=info_adapter,
        ),
        "observation_space": OBSERVATION_SPACE,
        "action_space": ACTION_SPACE,
    }
Пример #28
0
def main(scenarios, headless, seed):
    scenarios_iterator = Scenario.scenario_variations(scenarios, [])
    smarts = SMARTS(
        agent_interfaces={},
        traffic_sim=None,
        envision=None if headless else Envision(),
    )
    for _ in scenarios:
        scenario = next(scenarios_iterator)
        agent_missions = scenario.discover_missions_of_traffic_histories()

        for agent_id, mission in agent_missions.items():
            agent_spec = AgentSpec(
                interface=AgentInterface.from_type(AgentType.LanerWithSpeed,
                                                   max_episode_steps=None),
                agent_builder=KeepLaneAgent,
                agent_params=scenario.traffic_history_target_speed,
            )
            agent = agent_spec.build_agent()

            # Take control of vehicle with corresponding agent_id
            smarts.switch_ego_agent({agent_id: agent_spec.interface})

            # tell the traffic history provider to start traffic
            # at the point when this agent enters...
            traffic_history_provider = smarts.get_provider_by_type(
                TrafficHistoryProvider)
            assert traffic_history_provider
            traffic_history_provider.start_time = mission.start_time

            # agent vehicle will enter right away...
            modified_mission = replace(mission, start_time=0.0)
            scenario.set_ego_missions({agent_id: modified_mission})

            observations = smarts.reset(scenario)

            dones = {agent_id: False}
            while not dones.get(agent_id, True):
                agent_obs = observations[agent_id]
                agent_action = agent.act(agent_obs)

                observations, rewards, dones, infos = smarts.step(
                    {agent_id: agent_action})

    smarts.destroy()
Пример #29
0
 def __new__(
     self,
     policy_class,
     action_type,
     checkpoint_dir=None,
     task=None,
     max_episode_steps=1200,
     experiment_dir=None,
 ):
     if experiment_dir:
         print(f"LOADING SPEC from {experiment_dir}/spec.pkl")
         with open(f"{experiment_dir}/spec.pkl", "rb") as input:
             spec = dill.load(input)
             new_spec = AgentSpec(
                 interface=spec.interface,
                 agent_params=dict(
                     policy_params=spec.agent_params["policy_params"],
                     checkpoint_dir=checkpoint_dir,
                 ),
                 agent_builder=spec.policy_builder,
                 observation_adapter=spec.observation_adapter,
                 reward_adapter=spec.reward_adapter,
             )
             spec = new_spec
     else:
         adapter = BaselineAdapter()
         policy_dir = "/".join(
             inspect.getfile(policy_class).split("/")[:-1])
         policy_params = load_yaml(f"{policy_dir}/params.yaml")
         spec = AgentSpec(
             interface=AgentInterface(
                 waypoints=Waypoints(lookahead=20),
                 neighborhood_vehicles=NeighborhoodVehicles(200),
                 action=action_type,
                 rgb=False,
                 max_episode_steps=max_episode_steps,
                 debug=True,
             ),
             agent_params=dict(policy_params=policy_params,
                               checkpoint_dir=checkpoint_dir),
             agent_builder=policy_class,
             observation_adapter=adapter.observation_adapter,
             reward_adapter=adapter.reward_adapter,
         )
     return spec
Пример #30
0
        def on_trigger(ctx: Dict[str, Any]):
            # Define agent specs to be assigned
            agent_spec = AgentSpec(
                interface=AgentInterface(waypoints=True, action=ActionSpaceType.Lane),
                agent_builder=BasicAgent,
            )

            # Select a random sample from candidates
            k = ctx.get("vehicles_to_replace_randomly", 0)
            if k <= 0:
                logger.warning(
                    "default (0) or negative value specified for replacement. Replacing all valid vehicle candidates."
                )
                sample = ctx["vehicle_candidates"]
            else:
                logger.info(
                    f"Choosing {k} vehicles randomly from {len(ctx['vehicle_candidates'])} valid vehicle candidates."
                )
                sample = random.sample(ctx["vehicle_candidates"], k)
            assert len(sample) != 0

            for veh_id in sample:
                # Map selected vehicles to agent ids & specs
                agent_id = f"agent-{veh_id}"
                ctx["agents"][agent_id] = agent_spec.build_agent()

                # Create missions based on current state and traffic history
                positional, traverse = scenario.create_dynamic_traffic_history_mission(
                    veh_id, ctx["elapsed_sim_time"], ctx["positional_radius"]
                )

                # Take control of vehicles immediately
                try:
                    # Try to assign a PositionalGoal at the last recorded timestep
                    smarts.add_agent_and_switch_control(
                        veh_id, agent_id, agent_spec.interface, positional
                    )
                except PlanningError:
                    logger.warning(
                        f"Unable to create PositionalGoal for vehicle {veh_id}, falling back to TraverseGoal"
                    )
                    smarts.add_agent_and_switch_control(
                        veh_id, agent_id, agent_spec.interface, traverse
                    )