示例#1
0
def tsdf_voxel_volume(fx, fy, cx, cy):
    print("Estimating voxel volume bounds....")
    cam_intrinsics = cam.camera_intrinsics(fx, fy, cx, cy)
    vol_bnds = np.zeros((3, 2))
    depth_image = np.load(
        "/home/aditya/PycharmProjects/OpenCV-python/Project_2/TDCV-Project-2/trial1.npz"
    )['depth']
    camera_pose = np.identity(4)

    view_frust_pts = fusion.get_view_frustum(depth_image, cam_intrinsics,
                                             camera_pose)
    vol_bnds[:, 0] = np.minimum(vol_bnds[:, 0], np.amin(view_frust_pts,
                                                        axis=1))
    vol_bnds[:, 1] = np.maximum(vol_bnds[:, 1], np.amax(view_frust_pts,
                                                        axis=1))

    print("Initializing voxel volume...")
    tsdf_vol = fusion.TSDFVolume(vol_bnds, voxel_size=0.01)

    tsdf_vol.integrate(depth_image,
                       depth_image,
                       cam_intrinsics,
                       camera_pose,
                       obs_weight=1.0)
    print("Saving mesh to mesh.ply...")
    verts, faces, norms, colors = tsdf_vol.get_mesh()
    fusion.meshwrite("mesh.ply", verts, faces, norms, colors)
    """
示例#2
0
    #print "Cam poses shape",cam_poses.shape
    vol_bnds = np.zeros((3, 2))

    file = open('data_table/associate.txt')
    lines = file.read().split("\n")
    print "Number of lines in associate", len(lines)
    for i in range(len(lines) - 1):
        # Read depth image and camera pose
        depth_file = base_dir + '/' + lines[i].split(" ")[2]
        depth_im = cv2.imread(depth_file,
                              cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
        depth_im = depth_im.astype(np.float)
        depth_im /= 5000.  # depth is saved in 16-bit PNG in millimeters
        cam_pose = cam_poses[4 * i:4 * (i + 1), :]

        view_frust_pts = fusion.get_view_frustum(depth_im, cam_intr, cam_pose)
        vol_bnds[:, 0] = np.minimum(vol_bnds[:, 0],
                                    np.amin(view_frust_pts, axis=1))
        vol_bnds[:, 1] = np.maximum(vol_bnds[:, 1],
                                    np.amax(view_frust_pts, axis=1))
        break
    print "Volume Bounds:", vol_bnds
    file.close()
    # ======================================================================================================== #

    # Integrate
    # ======================================================================================================== #
    # Initialize voxel volume
    print("Initializing voxel volume...")
    tsdf_vol = fusion.TSDFVolume(vol_bnds, voxel_size=0.005)
示例#3
0
def main(args):
    if args.example == 'cpp':
        print("Using PyTorch CPP.")
        from cpp.integrate import integrate
    elif args.example == 'jit':
        print("Using PyTorch JIT.")
        from jit.integrate import integrate
    elif args.example == 'py':
        print("Using vanilla PyTorch.")
        from python.integrate import integrate
    else:
        pass

    # ======================================================================================================== #
    # (Optional) This is an example of how to compute the 3D bounds
    # in world coordinates of the convex hull of all camera view
    # frustums in the dataset
    # ======================================================================================================== #
    print("Estimating voxel volume bounds...")
    n_imgs = 15
    cam_intr = np.loadtxt("data/camera-intrinsics.txt", delimiter=' ')
    vol_bnds = np.zeros((3, 2))
    for i in range(n_imgs):
        # Read depth image and camera pose
        depth_im = cv2.imread("data/frame-%06d.depth.png" % (i),
                              -1).astype(float)
        depth_im /= 1000.  # depth is saved in 16-bit PNG in millimeters
        depth_im[
            depth_im ==
            65.535] = 0  # set invalid depth to 0 (specific to 7-scenes dataset)
        cam_pose = np.loadtxt("data/frame-%06d.pose.txt" %
                              (i))  # 4x4 rigid transformation matrix

        # Compute camera view frustum and extend convex hull
        view_frust_pts = fusion.get_view_frustum(depth_im, cam_intr, cam_pose)
        vol_bnds[:, 0] = np.minimum(vol_bnds[:, 0],
                                    np.amin(view_frust_pts, axis=1))
        vol_bnds[:, 1] = np.maximum(vol_bnds[:, 1],
                                    np.amax(view_frust_pts, axis=1))
    # ======================================================================================================== #

    # Initialize voxel volume
    print("Initializing voxel volume...")
    tsdf_vol = fusion.TSDFVolume(vol_bnds, 0.02, integrate)

    # ======================================================================================================== #
    # Integrate
    # ======================================================================================================== #
    # Loop through RGB-D images and fuse them together
    t0_elapse = time.time()
    times = []
    for i in range(n_imgs):
        print("Fusing frame %d/%d" % (i + 1, n_imgs))

        # Read RGB-D image and camera pose
        color_image = cv2.cvtColor(
            cv2.imread("data/frame-%06d.color.jpg" % (i)), cv2.COLOR_BGR2RGB)
        depth_im = cv2.imread("data/frame-%06d.depth.png" % (i),
                              -1).astype(float)
        depth_im /= 1000.
        depth_im[depth_im == 65.535] = 0
        cam_pose = np.loadtxt("data/frame-%06d.pose.txt" % (i))

        # Integrate observation into voxel volume (assume color aligned with depth)
        tic = time.time()
        tsdf_vol.integrate(color_image,
                           depth_im,
                           cam_intr,
                           cam_pose,
                           obs_weight=1.)
        toc = time.time()
        times.append(toc - tic)

    fps = n_imgs / (time.time() - t0_elapse)
    print("Average FPS: {:.2f}".format(fps))

    times = [t * TIME_SCALES[args.scale] for t in times]
    print("Average integration time: {:.3f} {}".format(np.mean(times),
                                                       args.scale))

    # Extract pointcloud
    point_cloud = tsdf_vol.extract_point_cloud()
    fusion.pcwrite("pc.ply", point_cloud)

    # Get mesh from voxel volume and save to disk (can be viewed with Meshlab)
    print("Saving to mesh.ply...")
    verts, faces, norms, colors = tsdf_vol.extract_triangle_mesh()
    fusion.meshwrite("mesh.ply", verts, faces, norms, colors)