예제 #1
0
def projective_forward_sphere(src_images, intrinsics, tgt_pose_rt, tgt_pos,
                              depths):
    """Project from a position within the sphere sweep volume to an equirect.
    Args:
      src_images: [layers, batch, height, width, channels]
      tgt_pose_rt: [batch, 4, 4] in the form of [R, t]
      tgt_pos: [batch, 3, 1]
      depths: [layers, batch]
    Returns:
      proj_src_images: [layers, batch, height, width, channels]
    """

    n_layers, n_batch, height, width, channels = src_images.get_shape(
    ).as_list()

    pixel_coords = []
    for i in range(n_batch):
        pixel_coords_batch = []
        pixels = spherical.intersect_sphere(tgt_pose_rt[i], tgt_pos[i],
                                            depths[:, i], n_layers, n_batch,
                                            width, height)
        pixel_coords.append(pixels)
    pixel_coords = tf.stack(pixel_coords, axis=0)
    pixel_coords = tf.transpose(pixel_coords, [1, 0, 2, 3, 4])

    proj_images = []
    for i in range(n_layers):
        resampled = sampling.bilinear_wrapper2(src_images[i], pixel_coords[i])
        proj_images.append(resampled)

    proj_src_images = tf.stack(proj_images, axis=0)

    return proj_src_images
예제 #2
0
def projective_forward_sphere_to_perspective(src_images,
                                             intrinsics,
                                             tgt_pose_rt,
                                             tgt_pos,
                                             depths,
                                             viewing_window=3,
                                             tgt_height=320,
                                             tgt_width=640):
    """Project from a position within the sphere sweep volume to a perspective image.
    Args:
      src_images: [layers, batch, height, width, channels]
      tgt_pose_rt: [batch, 4, 4] in the form of [R, t]
      tgt_pos: [batch, 3, 1]
      depths: [layers, batch]
      viewing_direction: 3 default gives the central crop..
    Returns:
      proj_src_images: [layers, batch, height, width, channels]
    """

    n_layers, n_batch, src_height, src_width, channels = src_images.get_shape(
    ).as_list()
    pixel_coords = []

    #rotate by 270 degree around y-axis to crop the center view
    angles = [[0., viewing_window * np.pi / 2., 0.]]
    rot = tfgt.rotation_matrix_3d.from_euler(angles)
    tr = tf.zeros([1, 3, 1])
    tgt_pose_rt = tf.concat([rot, tr], axis=2)
    tgt_pose_rt = tf.concat(
        [tgt_pose_rt, tf.expand_dims(tf.eye(4), 0)[:, 3:, :]], axis=1)
    tgt_pose_rt = tf.tile(tgt_pose_rt, [n_batch, 1, 1])

    for i in range(n_batch):
        pixel_coords_batch = []
        pixels = spherical.intersect_perspective(tgt_pose_rt[i], tgt_pos[i],
                                                 depths[:, i], n_layers,
                                                 n_batch, src_width,
                                                 src_height, tgt_width,
                                                 tgt_height, intrinsics)
        pixel_coords.append(pixels)
    pixel_coords = tf.stack(pixel_coords, axis=0)
    pixel_coords = tf.transpose(pixel_coords, [1, 0, 2, 3, 4])

    proj_images = []
    for i in range(n_layers):
        resampled = sampling.bilinear_wrapper2(src_images[i], pixel_coords[i])
        proj_images.append(resampled)
    proj_src_images = tf.stack(proj_images, axis=0)
    return proj_src_images
예제 #3
0
def sweep_one(image, order, depths, pose, intrinsics, st_fun, backproj_fun,
              proj_fun):

    batch, height, width, channels = image.get_shape().as_list()

    if isinstance(depths, list):
        num_planes = len(depths)
    else:
        num_planes, = depths.get_shape().as_list()

    # Construct S, T
    S, T = st_fun([height, width])

    # Backproject points into reference frame MPI
    all_resampled = []
    for i in range(batch):
        # Pose and intrinsics
        intrinsic = tf.slice(intrinsics, [i, 0, 0], [1, 3, 3])
        intrinsic = tf.concat(
            [intrinsic, tf.zeros([1, 1, 3], tf.float32)], axis=1)
        intrinsic = tf.concat(
            [intrinsic, tf.zeros([1, 4, 1], tf.float32)], axis=2)
        intrinsic_tiled = tf.tile(intrinsic, [num_planes, 1, 1])
        pose_one = tf.slice(pose, [i, 0, 0], [1, 4, 4])
        pose_tiled = tf.tile(pose_one, [num_planes, 1, 1])

        points = backproj_fun(S, T, tf.convert_to_tensor(depths, tf.float32),
                              intrinsic_tiled)

        # Apply pose
        points = apply_pose(points, pose_tiled)

        # Project points into source frame
        pixel_coords = proj_fun(points, order, pose_tiled, intrinsic_tiled,
                                width, height)

        # Resample
        image_one = tf.slice(image, [i, 0, 0, 0], [1, height, width, channels])
        image_tiled = tf.tile(image_one, [num_planes, 1, 1, 1])
        resampled = sampling.bilinear_wrapper2(image_tiled, pixel_coords)
        resampled = tf.transpose(resampled, [1, 2, 0, 3])

        all_resampled.append(resampled)

    all_resampled = tf.stack(all_resampled)
    resampled = tf.reshape(all_resampled,
                           [batch, height, width, channels * num_planes])
    return resampled
예제 #4
0
def sweep_one(image, order, depths, pose, intrinsics, st_fun, backproj_fun, proj_fun, pro2=False):
    
    if pro2:
        batch, _, height, width, channels = image.get_shape().as_list()
    else:
        batch, height, width, channels = image.get_shape().as_list()

    if isinstance(depths, list):
        num_planes = len(depths)
    else:
        num_planes, = depths.get_shape().as_list()

    # Construct S, T
    S, T = st_fun([height, width])

    # Backproject points into reference frame MPI
    all_resampled = []
    for i in range(batch):
        # Pose and intrinsics
        if not pro2:
            intrinsic = tf.slice(intrinsics, [i, 0, 0], [1, 3, 3])
            intrinsic = tf.concat(
                [intrinsic, tf.zeros([1, 1, 3], tf.float32)], axis=1)
            intrinsic = tf.concat(
                [intrinsic, tf.zeros([1, 4, 1], tf.float32)], axis=2)
            intrinsic_tiled = tf.tile(intrinsic, [num_planes, 1, 1])
            pose_one = tf.slice(pose, [i, 0, 0], [1, 4, 4])
            pose_tiled = tf.tile(pose_one, [num_planes, 1, 1])
        else:
            intrinsic_tiled = tf.slice(intrinsics, [i, 0, 0], [1, 6, 15])
            pose_tiled = tf.slice(pose, [i, 0, 0, 0], [1, 6, 4, 4])

        points = backproj_fun(S, T, tf.convert_to_tensor(
            depths, tf.float32), intrinsic_tiled)

        # Apply pose
        if not pro2:
            points = apply_pose(points, pose_tiled)

        # Project points into source frame
        pixel_coords = proj_fun(points, order, pose_tiled,
                                intrinsic_tiled, width, height)

        # Resample
        if not pro2:
            image_one = tf.slice(image, [i, 0, 0, 0], [1, height, width, channels])
            image_tiled = tf.tile(image_one, [num_planes, 1, 1, 1])
            resampled = sampling.bilinear_wrapper2(image_tiled, pixel_coords)
            resampled = tf.transpose(resampled, [1, 2, 0, 3])
        else:
            with tf.device('/gpu:' + str(len(tf.config.experimental.list_physical_devices('GPU')) - 1)):
                image_one = tf.slice(image, [i, 0, 0, 0, 0], [1, 6, height, width, channels])
                resampled = tf.zeros([num_planes, height, width, channels])
                for i in range(6):
                    image_tiled = tf.squeeze(tf.tile(tf.slice(image_one, [0, i, 0, 0, 0], [1, 1, height, width, channels]), [num_planes, 1, 1, 1, 1]))
                    pixel_coords_slice = tf.squeeze(tf.slice(pixel_coords, [0, i, 0, 0, 0], [num_planes, 1, height, width, 2]))
                    resampled_slice = sampling.bilinear_wrapper2(image_tiled, pixel_coords_slice) * tf.tile(tf.cast(tf.greater(tf.reduce_sum(pixel_coords_slice, 3, keepdims=True), 0), tf.float32), [1, 1, 1, channels])
                    resampled = resampled + resampled_slice
                
                resampled = tf.transpose(resampled, [1, 2, 0, 3])



        all_resampled.append(resampled)

    all_resampled = tf.stack(all_resampled)
    resampled = tf.reshape(
        all_resampled, [batch, height, width, channels * num_planes])
    return resampled