Example #1
0
def test_graceful_interrupt(monkeypatch):
    """SMARTS should only throw a KeyboardInterript exception."""

    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner),
        policy_builder=lambda: AgentPolicy.from_function(lambda _: "keep_lane"
                                                         ),
    )
    agent = agent_spec.build_agent()
    env = build_env(agent_spec)

    with pytest.raises(KeyboardInterrupt):
        obs = env.reset()

        # To simulate a user interrupting the sim (e.g. ctrl-c). We just need to
        # hook in to some function that SMARTS calls internally (like this one).
        with mock.patch("smarts.core.sensors.Sensors.observe",
                        side_effect=KeyboardInterrupt):
            for episode in range(10):
                obs, _, _, _ = env.step({AGENT_ID: agent.act(obs)})

        assert episode == 0, "SMARTS should have been interrupted, ending early"

    with pytest.raises(SMARTSNotSetupError):
        env.step({AGENT_ID: agent.act(obs)})
Example #2
0
def test_building_agent_with_list_or_tuple_params():
    agent_spec = AgentSpec(
        policy_params=[32, 41],
        policy_builder=lambda x, y: AgentPolicy.from_function(lambda _:
                                                              (x, y)),
    )

    agent = agent_spec.build_agent()
    assert agent.act("dummy observation") == (32, 41)
Example #3
0
def test_building_agent_with_dict_params():
    agent_spec = AgentSpec(
        policy_params={
            "y": 2,
            "x": 1
        },
        policy_builder=lambda x, y: AgentPolicy.from_function(lambda _: x / y),
    )

    agent = agent_spec.build_agent()
    assert agent.act("dummy observation") == 1 / 2
Example #4
0
def test_graceful_shutdown():
    """SMARTS should not throw any exceptions when shutdown."""
    agent_spec = AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner),
        policy_builder=lambda: AgentPolicy.from_function(lambda _: "keep_lane"
                                                         ),
    )
    env = build_env(agent_spec)
    agent = agent_spec.build_agent()
    obs = env.reset()
    for _ in range(10):
        obs = env.step({AGENT_ID: agent.act(obs)})

    env.close()
Example #5
0
def agent_spec():
    return AgentSpec(
        interface=AgentInterface(
            drivable_area_grid_map=True,
            ogm=True,
            rgb=True,
            lidar=True,
            waypoints=True,
            max_episode_steps=MAX_STEPS_PER_EPISODE,
            debug=True,
            neighborhood_vehicles=True,
            action=ActionSpaceType.Lane,
        ),
        policy_builder=lambda: AgentPolicy.from_function(lambda _: "keep_lane"
                                                         ),
    )
Example #6
0
def agent_spec():
    return AgentSpec(
        interface=AgentInterface(
            road_waypoints=RoadWaypoints(40),
            neighborhood_vehicles=NeighborhoodVehicles(
                radius=max(MAP_WIDTH * MAP_RESOLUTION, MAP_HEIGHT * MAP_RESOLUTION)
                * 0.5
            ),
            drivable_area_grid_map=DrivableAreaGridMap(
                width=MAP_WIDTH, height=MAP_HEIGHT, resolution=MAP_RESOLUTION
            ),
            ogm=OGM(width=MAP_WIDTH, height=MAP_HEIGHT, resolution=MAP_RESOLUTION),
            rgb=RGB(width=MAP_WIDTH, height=MAP_HEIGHT, resolution=MAP_RESOLUTION),
            action=ActionSpaceType.Lane,
        ),
        policy_builder=lambda: AgentPolicy.from_function(lambda _: "keep_lane"),
    )
Example #7
0
def agent_spec():
    def observation_adapter(env_observation):
        ego = env_observation.ego_vehicle_state
        waypoint_paths = env_observation.waypoint_paths
        wps = [path[0] for path in waypoint_paths]

        # distance of vehicle from center of lane
        closest_wp = min(wps, key=lambda wp: wp.dist_to(ego.position))
        signed_dist_from_center = closest_wp.signed_lateral_error(ego.position)
        lane_hwidth = closest_wp.lane_width * 0.5
        norm_dist_from_center = signed_dist_from_center / lane_hwidth

        return {OBSERVATION_EXPECTED: norm_dist_from_center}

    def reward_adapter(env_obs, env_reward):
        # reward is currently the delta in distance travelled by this agent.
        # We want to make sure that this is infact a delta and not total distance
        # travelled since this bug has appeared a few times.
        #
        # The way to verify this is by making sure the reward does not grow without bounds.
        assert -3 < env_reward < 3

        # Return a constant reward to test reward adapter call.
        return REWARD_EXPECTED

    def info_adapter(env_obs, env_reward, env_info):
        env_info[INFO_EXTRA_KEY] = "blah"
        return env_info

    def action_adapter(model_action):
        # We convert the action command to the required lower case.
        return model_action.lower()

    return AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner,
                                           max_episode_steps=100),
        policy_builder=lambda: AgentPolicy.from_function(lambda _:
                                                         ACTION_TO_BE_ADAPTED),
        observation_adapter=observation_adapter,
        reward_adapter=reward_adapter,
        action_adapter=action_adapter,
        info_adapter=info_adapter,
    )
Example #8
0
def agent_spec():
    return AgentSpec(
        interface=AgentInterface.from_type(AgentType.Laner, max_episode_steps=100),
        policy_builder=lambda: AgentPolicy.from_function(lambda _: "keep_lane"),
    )