Example #1
0
def test_graceful_interrupt(monkeypatch):
    """SMARTS should only throw a KeyboardInterript exception."""

    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner),
        agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
    )
    agent = agent_spec.build_agent()
    env = build_env(agent_spec)

    with pytest.raises(KeyboardInterrupt):
        obs = env.reset()

        # To simulate a user interrupting the sim (e.g. ctrl-c). We just need to
        # hook in to some function that SMARTS calls internally (like this one).
        with mock.patch(
            "smarts.core.sensors.Sensors.observe", side_effect=KeyboardInterrupt
        ):
            for episode in range(10):
                obs, _, _, _ = env.step({AGENT_ID: agent.act(obs)})

        assert episode == 0, "SMARTS should have been interrupted, ending early"

    with pytest.raises(SMARTSNotSetupError):
        env.step({AGENT_ID: agent.act(obs)})
Example #2
0
def _make_agent_specs(num_agent):
    agent_specs = {
        "AGENT_" + str(agent_id): AgentSpec(
            interface=AgentInterface(
                rgb=RGB(),
                action=ActionSpaceType.Lane,
            ),
            agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
            observation_adapter=lambda obs: obs.top_down_rgb.data,
            reward_adapter=lambda obs, reward: reward,
            info_adapter=lambda obs, reward, info: info["score"],
        )
        for agent_id in range(num_agent)
    }

    obs_space = gym.spaces.Dict({
        "AGENT_" + str(agent_id): gym.spaces.Box(
            low=0,
            high=255,
            shape=(
                agent_specs["AGENT_" + str(agent_id)].interface.rgb.width,
                agent_specs["AGENT_" + str(agent_id)].interface.rgb.height,
                3,
            ),
            dtype=np.uint8,
        )
        for agent_id in range(num_agent)
    })

    return agent_specs, obs_space
Example #3
0
def test_building_agent_with_dict_params():
    agent_spec = AgentSpec(
        agent_params={"y": 2, "x": 1},
        agent_builder=lambda x, y: Agent.from_function(lambda _: x / y),
    )

    agent = agent_spec.build_agent()
    assert agent.act("dummy observation") == 1 / 2
Example #4
0
def test_building_agent_with_tuple_params():
    agent_spec = AgentSpec(
        agent_params=(32, 41),
        agent_builder=lambda x, y: Agent.from_function(lambda _: (x, y)),
    )

    agent = agent_spec.build_agent()
    assert agent.act("dummy observation") == (32, 41)
Example #5
0
def test_graceful_shutdown():
    """SMARTS should not throw any exceptions when shutdown."""
    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner),
        agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
    )
    env = build_env(agent_spec)
    agent = agent_spec.build_agent()
    obs = env.reset()
    for _ in range(10):
        obs = env.step({AGENT_ID: agent.act(obs)})

    env.close()
Example #6
0
def agent_spec(max_steps_per_episode):
    return AgentSpec(
        interface=AgentInterface(
            drivable_area_grid_map=True,
            ogm=True,
            rgb=True,
            lidar=True,
            waypoints=True,
            max_episode_steps=max_steps_per_episode,
            debug=True,
            neighborhood_vehicles=True,
            action=ActionSpaceType.Lane,
        ),
        agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
    )
Example #7
0
def agent_specs():
    def observation_adapter(env_observation):
        return env_observation.top_down_rgb.data

    return {
        "AGENT_" + agent_id: AgentSpec(
            interface=AgentInterface(
                rgb=RGB(),
                action=ActionSpaceType.Lane,
            ),
            agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
            observation_adapter=observation_adapter,
        )
        for agent_id in ["001", "002"]
    }
Example #8
0
def agent_specs():
    return {
        "Agent_" + agent_id: AgentSpec(
            interface=AgentInterface(
                rgb=RGB(width=256, height=256, resolution=50 / 256),
                action=ActionSpaceType.Lane,
                max_episode_steps=3,
            ),
            agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
            observation_adapter=lambda obs: obs.top_down_rgb.data,
            reward_adapter=lambda obs, reward: reward,
            info_adapter=lambda obs, reward, info: info["score"],
        )
        for agent_id in ["1", "2"]
    }
Example #9
0
def _make_agent_specs(topdown_rgb):
    if topdown_rgb == "rgb":
        rgb = RGB()
    elif topdown_rgb == "false":
        rgb = False

    return {
        "AGENT_" + agent_id: AgentSpec(
            interface=AgentInterface(
                rgb=rgb,
                action=ActionSpaceType.Lane,
            ),
            agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
        )
        for agent_id in ["001", "002"]
    }
Example #10
0
def agent_spec():
    return AgentSpec(
        interface=AgentInterface(
            road_waypoints=RoadWaypoints(40),
            neighborhood_vehicles=NeighborhoodVehicles(
                radius=max(MAP_WIDTH * MAP_RESOLUTION, MAP_HEIGHT * MAP_RESOLUTION)
                * 0.5
            ),
            drivable_area_grid_map=DrivableAreaGridMap(
                width=MAP_WIDTH, height=MAP_HEIGHT, resolution=MAP_RESOLUTION
            ),
            ogm=OGM(width=MAP_WIDTH, height=MAP_HEIGHT, resolution=MAP_RESOLUTION),
            rgb=RGB(width=MAP_WIDTH, height=MAP_HEIGHT, resolution=MAP_RESOLUTION),
            action=ActionSpaceType.Lane,
        ),
        agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
    )
Example #11
0
def agent_spec():
    def observation_adapter(env_observation):
        ego = env_observation.ego_vehicle_state
        waypoint_paths = env_observation.waypoint_paths
        wps = [path[0] for path in waypoint_paths]

        # distance of vehicle from center of lane
        closest_wp = min(wps, key=lambda wp: wp.dist_to(ego.position))
        signed_dist_from_center = closest_wp.signed_lateral_error(ego.position)
        lane_hwidth = closest_wp.lane_width * 0.5
        norm_dist_from_center = signed_dist_from_center / lane_hwidth

        return {OBSERVATION_EXPECTED: norm_dist_from_center}

    def reward_adapter(env_obs, env_reward):
        # reward is currently the delta in distance travelled by this agent.
        # We want to make sure that this is infact a delta and not total distance
        # travelled since this bug has appeared a few times.
        #
        # The way to verify this is by making sure the reward does not grow without bounds.
        assert -3 < env_reward < 3

        # Return a constant reward to test reward adapter call.
        return REWARD_EXPECTED

    def info_adapter(env_obs, env_reward, env_info):
        env_info[INFO_EXTRA_KEY] = "blah"
        return env_info

    def action_adapter(model_action):
        # We convert the action command to the required lower case.
        return model_action.lower()

    return AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner,
                                           max_episode_steps=100),
        agent_builder=lambda: Agent.from_function(lambda _:
                                                  ACTION_TO_BE_ADAPTED),
        observation_adapter=observation_adapter,
        reward_adapter=reward_adapter,
        action_adapter=action_adapter,
        info_adapter=info_adapter,
    )
Example #12
0
def _make_agent_specs(intrfcs):
    base_intrfc = AgentInterface(
        action=ActionSpaceType.Lane,
        accelerometer=False,
        drivable_area_grid_map=False,
        lidar=False,
        neighborhood_vehicles=False,
        ogm=False,
        rgb=False,
        waypoints=False,
    )

    return {
        "AGENT_"
        + agent_id: AgentSpec(
            interface=dataclasses.replace(base_intrfc, **intrfc),
            agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
        )
        for agent_id, intrfc in zip(["001", "002"], intrfcs)
    }
Example #13
0
def run_experiment(agent: Agent, environment: UltraEnv, max_steps=30) -> Tuple:
    action_sequence = []
    infos_sequence = []
    observations_sequence = []
    rewards_sequence = []

    dones = {"__all__": False}

    observations = environment.reset()
    observations_sequence.append(observations)

    while not dones["__all__"] and len(action_sequence) <= max_steps:
        action = agent.act(observations[AGENT_ID])
        observations, rewards, dones, infos = environment.step(
            {AGENT_ID: action})

        action_sequence.append(action)
        infos_sequence.append(infos)
        observations_sequence.append(observations)
        rewards_sequence.append(rewards)

    environment.close()

    return action_sequence, infos_sequence, observations_sequence, rewards_sequence
Example #14
0
def agent_spec():
    return AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner, max_episode_steps=100),
        agent_builder=lambda: Agent.from_function(lambda _: "keep_lane"),
    )