コード例 #1
0
    def __init__(self, img_in_size=256, world_size=32):
        super(TopDownToEgoImg, self).__init__(img_in_size, world_size)
        self.is_cuda = False
        self.cuda_device = None

        # Process images using a resnet to get a feature map
        self.feature_net = nn.AvgPool2d(8, stride=8)

        self.map_affine = MapAffine(source_map_size=img_in_size,
                                    world_in_map_size=world_size)
コード例 #2
0
    def __init__(self, map_size, world_size):
        super(MapTransformerBase, self).__init__()

        self.latest_maps = None
        self.latest_map_poses = None
        self.latest_map = None
        self.latest_map_pose = None

        self.map_affine = MapAffine(source_map_size=map_size,
                                    world_in_map_size=world_size)
コード例 #3
0
    def __init__(self, source_map_size, world_size, dest_map_size=None):
        super(MapTransformerBase, self).__init__()

        if dest_map_size is None:
            dest_map_size = source_map_size

        self.latest_maps = None
        self.latest_map_poses = None

        self.map_affine = MapAffine(source_map_size=source_map_size,
                                    dest_map_size=dest_map_size,
                                    world_in_map_size=world_size)
コード例 #4
0
class TopDownToEgoImg(MapTransformerBase):
    def __init__(self, img_in_size=256, world_size_px=32):
        super(TopDownToEgoImg, self).__init__(img_in_size,
                                              world_size_px,
                                              world_size_m=world_size_px)
        self.is_cuda = False
        self.cuda_device = None

        # Process images using a resnet to get a feature map
        self.feature_net = nn.AvgPool2d(8, stride=8)

        self.map_affine = MapAffine(source_map_size=img_in_size,
                                    world_size_px=world_size_px)

    def cuda(self, device=None):
        MapTransformerBase.cuda(self, device)
        self.is_cuda = True
        self.cuda_device = device
        self.map_affine.cuda(device)
        return self

    def init_weights(self):
        pass

    def forward(self, image_g, pose):

        self.set_map(image_g, None)
        image_r, _ = self.get_map(pose)

        presenter = Presenter()
        presenter.show_image(image_g[0].data,
                             "img_g",
                             torch=True,
                             waitkey=False,
                             scale=2)
        presenter.show_image(image_r[0].data,
                             "img_r",
                             torch=True,
                             waitkey=100,
                             scale=2)

        features_r = self.feature_net(image_r)
        coverage = torch.ones_like(features_r)
        return features_r, coverage, image_r
コード例 #5
0
    def __init__(self,
                 source_map_size,
                 world_size_px,
                 world_size_m,
                 dest_map_size=None):
        super(MapTransformer, self).__init__()

        if dest_map_size is None:
            dest_map_size = source_map_size

        self.map_affine = MapAffine(source_map_size=source_map_size,
                                    dest_map_size=dest_map_size,
                                    world_size_px=world_size_px,
                                    world_size_m=world_size_m)
コード例 #6
0
class MapTransformerBase(CudaModule):

    # TODO: Standardize run_params
    # TODO: Remove latest_map and only leave latest_maps
    # TODO: Refactor this entire getting/setting idea
    def __init__(self, map_size, world_size):
        super(MapTransformerBase, self).__init__()

        self.latest_maps = None
        self.latest_map_poses = None
        self.latest_map = None
        self.latest_map_pose = None

        self.map_affine = MapAffine(source_map_size=map_size,
                                    world_in_map_size=world_size)

    def init_weights(self):
        pass

    def reset(self):
        self.latest_maps = None
        self.latest_map_poses = None
        self.latest_map = None
        self.latest_map_pose = None

    def cuda(self, device=None):
        CudaModule.cuda(self, device)
        self.map_affine.cuda(device)
        return self

    def get_map(self, cam_pose=None, show=""):
        """
        Return the latest map that's been accumulated.
        :param cam_pose: The map will be oriented in the frame of reference of cam_pose before returning
        :return:
        """
        """
        if not self.latest_map_pose == cam_pose:
            map_in_current_frame = self.map_affine(self.latest_map, self.latest_map_pose, cam_pose)
            if show != "":
                Presenter().show_image(map_in_current_frame.data[0, 0:3], show, torch=True, scale=8, waitkey=20)
            return map_in_current_frame, cam_pose
        else:
            return self.latest_map, self.latest_map_pose
        """
        maps, poses = self.get_maps([cam_pose])
        return maps[maps.size(0) - 1:maps.size(0)], poses[-1]

    def get_maps(self, cam_poses):
        """
        Return the latest sequence of maps that's been stored.
        :param cam_poses: Each map in the batch will be oriented in the frame of reference of cam_pose_i before returning
        :return:
        """
        #maps = []
        ## TODO: Add proper batch support to map_affine
        #for i, cam_pose in enumerate(cam_poses):
        #    if cam_pose == self.latest_map_poses[i]:
        #        maps.append(self.latest_maps[i])
        #    else:
        #        map_i_in_pose_i = self.map_affine(self.latest_maps[i:i+1], self.latest_map_poses[i:i+1], cam_pose)
        #        maps.append(map_i_in_pose_i)

        maps = self.map_affine(self.latest_maps, self.latest_map_poses,
                               cam_poses)
        return maps, cam_poses

    def set_map(self, map, pose):
        self.latest_maps = map
        self.latest_map_poses = [pose]

    def set_maps(self, maps, poses):
        self.latest_maps = maps
        self.latest_map_poses = poses
コード例 #7
0
ファイル: map_affine_tests.py プロジェクト: hyzcn/drif
def map_affine_test():
    img = load_env_img(2, 128, 128)
    img = standardize_image(img)
    img = torch.from_numpy(img).float().unsqueeze(0)

    pos = np.asarray([15, 15, 0])
    quat = euler.euler2quat(0, 0, 0)
    pose0 = Pose(pos[np.newaxis, :], quat[np.newaxis, :])

    theta1 = 0.5
    pos = np.asarray([15, 15, 0])
    quat = euler.euler2quat(0, 0, theta1)
    pose1 = Pose(pos[np.newaxis, :], quat[np.newaxis, :])

    D = 10.0
    pos = np.asarray([15 + D * math.cos(theta1), 15 + D * math.sin(theta1), 0])
    quat = euler.euler2quat(0, 0, theta1)
    pose2 = Pose(pos[np.newaxis, :], quat[np.newaxis, :])

    affine = MapAffine(128, 128, 128)
    res1 = affine(img, pose0, pose1)
    res2 = affine(res1, pose1, pose2)
    res3 = affine(img, pose0, pose2)

    prof = SimpleProfiler(torch_sync=True, print=True)
    affinebig = MapAffine(128, 256, 128)
    prof.tick("init")
    res3big = affinebig(img, pose0, pose2)
    prof.tick("affinebig")

    img = load_env_img(2, 32, 32)
    img = standardize_image(img)
    img = torch.from_numpy(img).float().unsqueeze(0).cuda()
    affines = MapAffine(32, 64, 32).cuda()
    torch.cuda.synchronize()
    prof.tick("init")
    res3s = affines(img, pose0, pose2)
    prof.tick("affines")

    prof.print_stats()

    print("Start pose: ", pose0)
    print("    Pose 1: ", pose1)
    print("    Pose 2: ", pose2)

    print("Res2, Res3 and Res3Big should align!")

    Presenter().show_image(img[0], "img", torch=True, waitkey=False, scale=2)
    Presenter().show_image(res1.data[0],
                           "res_1",
                           torch=True,
                           waitkey=False,
                           scale=2)
    Presenter().show_image(res2.data[0],
                           "res_2",
                           torch=True,
                           waitkey=False,
                           scale=2)
    Presenter().show_image(res3.data[0],
                           "res_3",
                           torch=True,
                           waitkey=False,
                           scale=2)
    Presenter().show_image(res3big.data[0],
                           "res3big",
                           torch=True,
                           waitkey=True,
                           scale=2)