示例#1
0
def spatial_transform(map_scale, p, dx, invert=False):
    """
    Applies the transformation dx to image p.
    Inputs:
        p - (bs, 2, H, W) map
        dx - (bs, 3) egocentric transformation --- (dx, dy, dtheta)

    Conventions:
        The origin is at the center of the map.
        X is upward with agent's forward direction
        Y is rightward with agent's rightward direction

    Note: These denote transforms in an agent's position. Not the image directly.
    For example, if an agent is moving upward, then the map will be moving downward.
    To disable this behavior, set invert=False.
    """
    s = map_scale
    # Convert dx to map image coordinate system with X as rightward and Y as downward
    dx_map = torch.stack([(dx[:, 1] / s), -(dx[:, 0] / s), dx[:, 2]],
                         dim=1)  # anti-clockwise rotation

    # insert context manager hopefully fixes bug
    # with sequence():
    p_trans = spatial_transform_map(p, dx_map, invert=invert)

    return p_trans
示例#2
0
    def _load_transformed_wall_maps(self, scene_map_info, episode):
        seen_maps = []
        wall_maps = []
        start_position = episode.start_position  # (X, Y, Z)
        start_rotation = quaternion_xyzw_to_wxyz(episode.start_rotation)
        start_heading = compute_heading_from_quaternion(start_rotation)
        for floor_data in scene_map_info:
            seen_map = np.load(floor_data["seen_map_path"])
            wall_map = np.load(floor_data["wall_map_path"])
            # ===== Transform the maps relative to the episode start pose =====
            map_view_position = floor_data["world_position"]
            map_view_heading = floor_data["world_heading"]
            # Originally, Z is downward and X is rightward.
            # Convert it to X upward and Y rightward
            x_map, y_map = -map_view_position[2], map_view_position[0]
            theta_map = map_view_heading
            x_start, y_start = -start_position[2], start_position[0]
            theta_start = start_heading
            # Compute relative coordinates
            r_rel = math.sqrt((x_start - x_map) ** 2 + (y_start - y_map) ** 2)
            phi_rel = math.atan2(y_start - y_map, x_start - x_map) - theta_map
            x_rel = r_rel * math.cos(phi_rel) / self.config.MAP_SCALE
            y_rel = r_rel * math.sin(phi_rel) / self.config.MAP_SCALE
            theta_rel = theta_start - theta_map
            # Convert these to image coordinates with X being rightward and Y
            # being downward
            x_img_rel = y_rel
            y_img_rel = -x_rel
            theta_img_rel = theta_rel
            x_trans = torch.Tensor([[x_img_rel, y_img_rel, theta_img_rel]])
            # Perform the transformations
            p_seen_map = rearrange(torch.Tensor(seen_map), "h w c -> () c h w")
            p_wall_map = rearrange(torch.Tensor(wall_map), "h w c -> () c h w")
            p_seen_map_trans = spatial_transform_map(p_seen_map, x_trans)
            p_wall_map_trans = spatial_transform_map(p_wall_map, x_trans)
            seen_map_trans = asnumpy(p_seen_map_trans)
            seen_map_trans = rearrange(seen_map_trans, "() c h w -> h w c")
            wall_map_trans = asnumpy(p_wall_map_trans)
            wall_map_trans = rearrange(wall_map_trans, "() c h w -> h w c")
            seen_maps.append(seen_map_trans)
            wall_maps.append(wall_map_trans)

        return seen_maps, wall_maps
示例#3
0
文件: ans.py 项目: aimagelab/LoCoNav
 def _compute_local_map_crop(self, global_map, global_pose):
     local_crop_size = self.config.LOCAL_POLICY.embed_map_size
     exp_crop_size = int(1.5 * local_crop_size)
     cropped_map = crop_map(
         global_map, self.states["curr_map_position"], exp_crop_size
     )
     global_heading = global_pose[:, 2]  # (bs, ) pose in radians
     rotation_params = torch.stack(
         [
             torch.zeros_like(global_heading),
             torch.zeros_like(global_heading),
             global_heading,
         ],
         dim=1,
     )
     rotated_map = spatial_transform_map(cropped_map, rotation_params)
     center_locs = torch.zeros_like(self.states["curr_map_position"])
     center_locs[:, 0] = rotated_map.shape[3] // 2
     center_locs[:, 1] = rotated_map.shape[2] // 2
     rotated_map = crop_map(rotated_map, center_locs, local_crop_size)
     return rotated_map