Beispiel #1
0
 def make_state_from_dict(self, states, device):
     image_keys = states[0]["images"].keys()
     images = {}
     for k in image_keys:
         _images = torch.cat([e[k] for e in states],
                             dim=0).float().to(device)
         _images = normalize_im(_images)
         images[k] = _images
     low_dim_states = (torch.cat([e["low_dim_states"] for e in states],
                                 dim=0).float().to(device))
     if "social_vehicles" in states[0]:
         social_vehicles = [
             e["social_vehicles"][0].float().to(device) for e in states
         ]
     else:
         social_vehicles = False
     out = {
         "images": images,
         "low_dim_states": low_dim_states,
         "social_vehicles": social_vehicles,
     }
     return out
Beispiel #2
0
 def make_state_from_dict(self, states, device):
     # TODO: temporary function here. this is copied from replay_buffer.py
     #  better way is to make PPO use the replay_buffer interface
     #  but may not be important for now. just make it work
     image_keys = states[0]["images"].keys()
     images = {}
     for k in image_keys:
         _images = (torch.cat([e[k].unsqueeze(0) for e in states],
                              dim=0).float().to(device))
         _images = normalize_im(_images)
         images[k] = _images
     low_dim_states = (torch.cat(
         [e["low_dim_states"].unsqueeze(0) for e in states],
         dim=0).float().to(device))
     social_vehicles = [
         e["social_vehicles"].float().to(device) for e in states
     ]
     out = {
         "images": images,
         "low_dim_states": low_dim_states,
         "social_vehicles": social_vehicles,
     }
     return out
def preprocess_state(
    state,
    state_description,
    convert_action_func,
    observation_num_lookahead,
    social_capacity,
    social_vehicle_config,
    prev_action,
    normalize=False,
    unsqueeze=False,
    device=None,
    draw=False,
):
    state = state.copy()
    images = {}
    for k in state_description["images"]:
        image = torch.from_numpy(state[k])
        image = image.unsqueeze(0) if unsqueeze else image
        image = image.to(device) if device else image
        image = normalize_im(image) if normalize else image
        images[k] = image

    if "action" in state:
        state["action"] = convert_action_func(state["action"])

    # -------------------------------------
    # filter lookaheads from goal_path
    _, lookahead_wps = get_closest_waypoint(
        num_lookahead=observation_num_lookahead,
        goal_path=state["goal_path"],
        ego_position=state["ego_position"],
        ego_heading=state["heading"],
    )
    state["waypoints_lookahead"] = np.hstack(lookahead_wps)

    # -------------------------------------
    # keep prev_action
    state["action"] = prev_action

    # -------------------------------------
    # normalize states and concat
    normalized = [
        _normalize(key, state[key]) for key in state_description["low_dim_states"]
    ]

    low_dim_states = [
        val if isinstance(val, Iterable) else np.asarray([val]).astype(np.float32)
        for val in normalized
    ]
    low_dim_states = torch.cat(
        [torch.from_numpy(e).float() for e in low_dim_states], dim=-1
    )
    low_dim_states = low_dim_states.unsqueeze(0) if unsqueeze else low_dim_states
    low_dim_states = low_dim_states.to(device) if device else low_dim_states

    # -------------------------------------
    # apply social vehicle encoder
    # only process if state is not encoded already
    state["social_vehicles"] = (
        get_social_vehicles(
            ego_vehicle_pos=state["ego_position"],
            ego_vehicle_heading=state["heading"],
            neighborhood_vehicles=state["social_vehicles"],
            social_vehicle_config=social_vehicle_config,
            waypoint_paths=state["waypoint_paths"],
        )
        if social_capacity > 0
        else []
    )
    # check if any social capacity is 0
    social_vehicle_dimension = state_description["social_vehicles"]
    social_vehicles = torch.empty(0, 0)

    if social_vehicle_dimension:
        social_vehicles = torch.from_numpy(np.asarray(state["social_vehicles"])).float()
        social_vehicles = social_vehicles.reshape((-1, social_vehicle_dimension))
    social_vehicles = social_vehicles.unsqueeze(0) if unsqueeze else social_vehicles
    social_vehicles = social_vehicles.to(device) if device else social_vehicles

    out = {
        "images": images,
        "low_dim_states": low_dim_states,
        "social_vehicles": social_vehicles,
    }
    return out