Пример #1
0
def poses_m_to_px(as_pose,
                  img_size_px,
                  world_size_px,
                  world_size_m,
                  batch_dim=False):
    world_size_px = np.asarray(world_size_px)
    pos = as_pose.position
    rot = as_pose.orientation

    #torch.cuda.synchronize()
    #prof = SimpleProfiler(torch_sync=True, print=True)

    # Turn into numpy
    if hasattr(pos, "is_cuda"):
        pos = pos.data.cpu().numpy()
        rot = rot.data.cpu().numpy()

    if len(pos.shape) == 1:
        pos = pos[np.newaxis, :]

    #prof.tick(".")

    pos_img = pos_m_to_px(pos[:, 0:2], img_size_px, world_size_m,
                          world_size_px)

    #prof.tick("pos")

    yaws = []
    if batch_dim:
        #rotm = rot.copy()
        #rotm = rot
        #rotm[:, 1] = 0
        #rotm[:, 2] = 0
        for i in range(rot.shape[0]):
            # Manual quat2euler
            #mag = math.sqrt(rotm[i][0] ** 2 + rotm[i][3] ** 2)
            #rotm[i, :] /= mag
            #sign = np.sign(rotm[i][3])
            #yaw = 2*math.acos(rotm[i][0]) * sign
            roll, pitch, yaw = euler.quat2euler(rot[i])
            #print(yaw, yaw_manual, sign)
            yaws.append(yaw)
    else:
        roll, pitch, yaw = euler.quat2euler(rot)
        yaws.append(yaw)
        pos_img = pos_img[0]

    #prof.tick("rot")

    if batch_dim:
        # Add additional axis so that orientation becomes Bx1 instead of just B,
        out = Pose(pos_img, np.asarray(yaws)[:, np.newaxis])
    else:
        out = Pose(pos_img, yaws[0])

    #prof.tick("fin")
    #prof.print_stats()

    return out
Пример #2
0
 def dic_to_pose(self, dic_of_pose):
     """
     :param dic_of_pose: pose stored as a dictionary
     :return: Pose object
     """
     pose = Pose(torch.tensor(dic_of_pose['position']), torch.tensor(dic_of_pose['orientation']))
     return pose
Пример #3
0
def runtest_fpv_to_global_map():
    img_to_map = FPVToGlobalMap(source_map_size=32,
                                world_size_px=32,
                                world_size=30,
                                img_w=256,
                                img_h=144,
                                res_channels=3,
                                map_channels=3,
                                img_dbg=True)

    import pickle
    import cv2
    with open(test_data_path(), "rb") as fp:
        test_data = pickle.load(fp)

    for i in range(len(test_data["images"])):
        image = test_data["images"][i]
        pose = test_data["cam_poses"][i]

        cv2.imshow("fpv_image", cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
        cv2.waitKey(1)

        image = standardize_image(image)
        image_t = Variable(torch.from_numpy(image))
        pose_t = pose.to_torch().to_var()
        pose_t = Pose(pose_t.position.unsqueeze(0),
                      pose_t.orientation.unsqueeze(0))
        image_t = image_t.unsqueeze(0)

        projected, poses = img_to_map(image_t, pose_t, None, show="yes")
        print("Ding")
        print("globalish poses: ", poses)
Пример #4
0
 def get_drone_pose(self):
     drn_pos = self.get_pos_3d()
     drn_rot_euler = self.get_rot_euler()
     drn_rot_quat = euler.euler2quat(drn_rot_euler[0], drn_rot_euler[1],
                                     drn_rot_euler[2])
     pose = Pose(drn_pos, drn_rot_quat)
     return pose
Пример #5
0
 def drone_poses_from_states(self, states):
     drn_pos = states[:, 0:3]
     drn_rot_euler = states[:, 3:6].detach().cpu().numpy()
     quats = [euler2quat(a[0], a[1], a[2]) for a in drn_rot_euler]
     quats = torch.from_numpy(np.asarray(quats)).to(drn_pos.device)
     pose = Pose(drn_pos, quats)
     return pose
Пример #6
0
    def __init__(self, source_map_size, dest_map_size, world_size_px,
                 world_size_m):
        super(MapAffine, self).__init__()
        self.source_map_size_px = source_map_size
        self.dest_map_size_px = dest_map_size
        self.world_in_map_size_px = world_size_px
        self.world_size_m = world_size_m

        self.affine_2d = Affine2D()

        self.prof = SimpleProfiler(torch_sync=PROFILE, print=PROFILE)

        pos = np.asarray(
            [self.source_map_size_px / 2, self.source_map_size_px / 2])
        rot = np.asarray([0])
        self.canonical_pose_src = Pose(pos, rot)

        pos = np.asarray(
            [self.dest_map_size_px / 2, self.dest_map_size_px / 2])
        rot = np.asarray([0])
        self.canonical_pose_dst = Pose(pos, rot)
Пример #7
0
    def cam_poses_from_states(self, states):
        cam_pos = states[:, 9:12]
        cam_rot = states[:, 12:16]

        pos_variance = 0
        rot_variance = 0
        if self.use_pos_noise:
            pos_variance = self.params["noisy_pos_variance"]
        if self.use_rot_noise:
            rot_variance = self.params["noisy_rot_variance"]

        pose = Pose(cam_pos, cam_rot)
        if self.use_pos_noise or self.use_rot_noise:
            pose = get_noisy_poses_torch(pose, pos_variance, rot_variance, cuda=self.is_cuda, cuda_device=self.cuda_device)
        return pose
Пример #8
0
def provider_noisy_poses(segment_data, data):
    """
    This provider returns noisy poses of type learning.inputs.Pose
    These noisy poses are used during training to rotate the semantic map by a random angle before predicting visitation
    probabilities as a form of data augmentation.
    :param segment_data:
    :param data:
    :return:
    """
    traj_len = len(segment_data)
    last_pos = None
    clean_poses = []

    model_params = P.get_current_parameters()["ModelPVN"]["Stage1"]
    use_first_pose = model_params["predict_in_start_frame"]

    seg_idx = -1
    first_step = 0
    for timestep in range(traj_len):

        if segment_data[timestep]["state"] is None:
            break
        if segment_data[timestep]["metadata"]["seg_idx"] != seg_idx:
            first_step = timestep
            seg_idx = segment_data[timestep]["metadata"]["seg_idx"]

        if use_first_pose:
            # X["state"] is a DroneState object
            pos_as = segment_data[first_step]["state"].state[9:12]
            rot_as = segment_data[first_step]["state"].state[12:16]
        else:
            pos_as = segment_data[timestep]["state"].state[9:12]
            rot_as = segment_data[timestep]["state"].state[12:16]

        clean_pose = Pose(pos_as, rot_as)
        clean_poses.append(clean_pose)

    params = P.get_current_parameters()["Data"]

    noisy_poses = get_noisy_poses_np(clean_poses, params["noisy_pos_variance"],
                                     params["noisy_rot_variance"])
    noisy_poses_t = noisy_poses.to_torch()

    return [("noisy_poses", noisy_poses_t)]
Пример #9
0
def provider_start_poses(segment_data, data):
    traj_len = len(segment_data)
    start_poses = []

    seg_idx = -2
    for timestep in range(traj_len):
        if segment_data[timestep] is None:
            break
        if segment_data[timestep]["metadata"]["seg_idx"] != seg_idx:
            seg_idx = segment_data[timestep]["metadata"]["seg_idx"]
            pos_as = segment_data[timestep]["state"].state[9:12]
            rot_as = segment_data[timestep]["state"].state[12:16]
            start_pose = Pose(pos_as, rot_as)
        start_poses.append(start_pose)

    start_poses = stack_poses_np(start_poses)
    sart_poses_t = start_poses.to_torch()

    return [("start_poses", sart_poses_t)]
Пример #10
0
def dyn_gt_test():
    presenter = Presenter()
    train_instr, dev_instr, test_instr, corpus = get_all_instructions()
    all_instr = {**train_instr, **dev_instr, **test_instr}

    for i in range(10):
        path = load_path(i)
        segments = all_instr[i][0]["instructions"]
        for seg in segments:
            start_idx = seg["start_idx"]
            end_idx = seg["end_idx"]
            randInt = random.randint(10, 100)

            start_pose = Pose(path[start_idx] - randInt, 0)

            if end_idx - start_idx > 0:
                randInt = random.randint(10, 100)
                new_path = get_dynamic_ground_truth(
                    path[start_idx:end_idx], (path[start_idx] - randInt))
                new_path1 = get_dynamic_ground_truth_smooth(
                    path[start_idx:end_idx], (path[start_idx] - randInt))
                presenter.plot_path(
                    i, [path[start_idx:end_idx], new_path, new_path1])
Пример #11
0
 def cam_poses_from_states(self, states):
     cam_pos = states[:, 9:12]
     cam_rot = states[:, 12:16]
     pose = Pose(cam_pos, cam_rot)
     return pose
Пример #12
0
    def get_canonical_frame_pose(self):
        pos = np.asarray([self.map_size / 2, self.map_size / 2])
        rot = np.asarray([0])

        return Pose(pos, rot)
Пример #13
0
def cam_pose_from_state(state):
    cam_pos = state[9:12]
    cam_rot = state[12:16]
    pose = Pose(cam_pos, cam_rot)
    return pose
Пример #14
0
 def get_cam_pose(self):
     cam_pos = self.state[9:12]
     cam_rot = self.state[12:16]
     pose = Pose(cam_pos, cam_rot)
     return pose
Пример #15
0
def map_affine_test():
    img = load_env_img(2, 128, 128)
    img = standardize_image(img)
    img = torch.from_numpy(img).float().unsqueeze(0)

    pos = np.asarray([15, 15, 0])
    quat = euler.euler2quat(0, 0, 0)
    pose0 = Pose(pos[np.newaxis, :], quat[np.newaxis, :])

    theta1 = 0.5
    pos = np.asarray([15, 15, 0])
    quat = euler.euler2quat(0, 0, theta1)
    pose1 = Pose(pos[np.newaxis, :], quat[np.newaxis, :])

    D = 10.0
    pos = np.asarray([15 + D * math.cos(theta1), 15 + D * math.sin(theta1), 0])
    quat = euler.euler2quat(0, 0, theta1)
    pose2 = Pose(pos[np.newaxis, :], quat[np.newaxis, :])

    affine = MapAffine(128, 128, 128)
    res1 = affine(img, pose0, pose1)
    res2 = affine(res1, pose1, pose2)
    res3 = affine(img, pose0, pose2)

    prof = SimpleProfiler(torch_sync=True, print=True)
    affinebig = MapAffine(128, 256, 128)
    prof.tick("init")
    res3big = affinebig(img, pose0, pose2)
    prof.tick("affinebig")

    img = load_env_img(2, 32, 32)
    img = standardize_image(img)
    img = torch.from_numpy(img).float().unsqueeze(0).cuda()
    affines = MapAffine(32, 64, 32).cuda()
    torch.cuda.synchronize()
    prof.tick("init")
    res3s = affines(img, pose0, pose2)
    prof.tick("affines")

    prof.print_stats()

    print("Start pose: ", pose0)
    print("    Pose 1: ", pose1)
    print("    Pose 2: ", pose2)

    print("Res2, Res3 and Res3Big should align!")

    Presenter().show_image(img[0], "img", torch=True, waitkey=False, scale=2)
    Presenter().show_image(res1.data[0],
                           "res_1",
                           torch=True,
                           waitkey=False,
                           scale=2)
    Presenter().show_image(res2.data[0],
                           "res_2",
                           torch=True,
                           waitkey=False,
                           scale=2)
    Presenter().show_image(res3.data[0],
                           "res_3",
                           torch=True,
                           waitkey=False,
                           scale=2)
    Presenter().show_image(res3big.data[0],
                           "res3big",
                           torch=True,
                           waitkey=True,
                           scale=2)
Пример #16
0
class MapAffine(nn.Module):
    # TODO: Cleanup unused run_params
    def __init__(self, source_map_size, dest_map_size, world_size_px,
                 world_size_m):
        super(MapAffine, self).__init__()
        self.source_map_size_px = source_map_size
        self.dest_map_size_px = dest_map_size
        self.world_in_map_size_px = world_size_px
        self.world_size_m = world_size_m

        self.affine_2d = Affine2D()

        self.prof = SimpleProfiler(torch_sync=PROFILE, print=PROFILE)

        pos = np.asarray(
            [self.source_map_size_px / 2, self.source_map_size_px / 2])
        rot = np.asarray([0])
        self.canonical_pose_src = Pose(pos, rot)

        pos = np.asarray(
            [self.dest_map_size_px / 2, self.dest_map_size_px / 2])
        rot = np.asarray([0])
        self.canonical_pose_dst = Pose(pos, rot)

    def pose_2d_to_mat_np(self, pose_2d, map_size, inv=False):
        pos = pose_2d.position
        yaw = pose_2d.orientation

        # Transform the img so that the drone's position ends up at the origin
        # TODO: Add batch support
        t1 = get_affine_trans_2d(-pos)

        # Rotate the img so that it's aligned with the drone's orientation
        yaw = -yaw
        t2 = get_affine_rot_2d(-yaw)

        # Translate the img so that it's centered around the drone
        t3 = get_affine_trans_2d([map_size / 2, map_size / 2])

        mat = np.dot(t3, np.dot(t2, t1))

        # Swap x and y axes (because of the BxCxHxW a.k.a BxCxYxX convention)
        swapmat = mat[[1, 0, 2], :]
        mat = swapmat[:, [1, 0, 2]]

        if inv:
            mat = np.linalg.inv(mat)

        return mat

    def poses_2d_to_mat_np(self, pose_2d, map_size, inv=False):
        pos = np.asarray(pose_2d.position)
        yaw = np.asarray(pose_2d.orientation)

        # Transform the img so that the drone's position ends up at the origin
        # TODO: Add batch support
        t1 = get_affine_trans_2d(-pos, batch=True)

        # Rotate the img so that it's aligned with the drone's orientation
        yaw = -yaw
        t2 = get_affine_rot_2d(-yaw, batch=True)

        # Translate the img so that it's centered around the drone
        t3 = get_affine_trans_2d(np.asarray([map_size / 2, map_size / 2]),
                                 batch=False)

        t21 = np.matmul(t2, t1)
        mat = np.matmul(t3, t21)

        # Swap x and y axes (because of the BxCxHxW a.k.a BxCxYxX convention)
        swapmat = mat[:, [1, 0, 2], :]
        mat = swapmat[:, :, [1, 0, 2]]

        if inv:
            mat = np.linalg.inv(mat)

        return mat

    def get_old_to_new_pose_mat(self, old_pose, new_pose):
        old_T_inv = self.pose_2d_to_mat_np(old_pose,
                                           self.source_map_size_px,
                                           inv=True)
        new_T = self.pose_2d_to_mat_np(new_pose,
                                       self.dest_map_size_px,
                                       inv=False)
        mat = np.dot(new_T, old_T_inv)
        #mat = new_T
        mat_t = np_to_tensor(mat, cuda=False)
        return mat_t

    def get_old_to_new_pose_matrices(self, old_pose, new_pose):
        old_T_inv = self.poses_2d_to_mat_np(old_pose,
                                            self.source_map_size_px,
                                            inv=True)
        new_T = self.poses_2d_to_mat_np(new_pose,
                                        self.dest_map_size_px,
                                        inv=False)
        mat = np.matmul(new_T, old_T_inv)
        #mat = new_T
        mat_t = np_to_tensor(mat, insert_batch_dim=False, cuda=False)
        return mat_t

    def get_affine_matrices(self, map_poses, cam_poses, batch_size):
        # Convert the pose from airsim coordinates to the image pixel coordinages
        # If the pose is None, use the canonical pose (global frame)
        if map_poses is not None:
            map_poses = map_poses.numpy(
            )  # TODO: Check if we're gonna have a list here or something
            # TODO: This is the big bottleneck. Could we precompute it in the dataloader?
            map_poses_img = poses_m_to_px(
                map_poses,
                self.source_map_size_px,
                [self.world_in_map_size_px, self.world_in_map_size_px],
                self.world_size_m,
                batch_dim=True)
        else:
            map_poses_img = self.canonical_pose_src.repeat_np(batch_size)

        if cam_poses is not None:
            cam_poses = cam_poses.numpy()
            cam_poses_img = poses_m_to_px(
                cam_poses,
                self.dest_map_size_px,
                [self.world_in_map_size_px, self.world_in_map_size_px],
                self.world_size_m,
                batch_dim=True)
        else:
            cam_poses_img = self.canonical_pose_dst.repeat_np(batch_size)

        # Get the affine transformation matrix to transform the map to the new camera pose
        affines = self.get_old_to_new_pose_matrices(map_poses_img,
                                                    cam_poses_img)

        return affines

    def get_affine_i(self, map_poses, cam_poses, i):
        # Convert the pose from airsim coordinates to the image pixel coordinages
        # If the pose is None, use the canonical pose (global frame)
        self.prof.tick("call")
        if map_poses is not None and map_poses[i] is not None:
            map_pose_i = map_poses[i].numpy()
            map_pose_img = poses_m_to_px(
                map_pose_i, self.source_map_size_px,
                [self.world_in_map_size_px, self.world_in_map_size_px],
                self.world_size_m)
        else:
            map_pose_img = self.canonical_pose_src

        if cam_poses is not None and cam_poses[i] is not None:
            cam_pose_i = cam_poses[i].numpy()
            cam_pose_img = poses_m_to_px(
                cam_pose_i, self.dest_map_size_px,
                [self.world_in_map_size_px, self.world_in_map_size_px],
                self.world_size_m)
        else:
            cam_pose_img = self.canonical_pose_dst

        self.prof.tick("convert_pose")

        # Get the affine transformation matrix to transform the map to the new camera pose
        affine_i = self.get_old_to_new_pose_mat(map_pose_img, cam_pose_img)

        self.prof.tick("calc_affine")
        return affine_i

    def forward(self, maps, current_poses, new_poses):
        """
        Affine transform the map from being centered around map_pose in the canonocial map frame to
        being centered around cam_pose in the canonical map frame.
        Canonical map frame is the one where the map origin aligns with the environment origin, but the env may
        or may not take up the entire map.
        :param map: map centered around the drone in map_pose
        :param current_poses: the previous drone pose in canonical map frame
        :param new_poses: the new drone pose in canonical map frame
        :return:
        """

        # TODO: Handle the case where cam_pose is None and return a map in the canonical frame
        self.prof.tick("out")
        batch_size = maps.size(0)

        self.prof.tick("init")

        affine_matrices_cpu = self.get_affine_matrices(current_poses,
                                                       new_poses, batch_size)

        self.prof.tick("affine_mat_and_pose")

        # Apply the affine transformation on the map
        # The affine matrices should be on CPU (if not, they'll be copied to CPU anyway!)
        # CUDA SYNC point:
        maps_out = self.affine_2d(
            maps,
            affine_matrices_cpu,
            out_size=[self.dest_map_size_px, self.dest_map_size_px])

        self.prof.tick("affine_2d")
        self.prof.loop()
        if batch_size > 1:
            self.prof.print_stats(20)

        return maps_out