def main(imgL_BGR, disp, Q):

    # disparity range is tuned for 'aloe' image pair
    min_disp = 0
    print('generating 3d point cloud...', )
    rgb = pptk.rand(100, 3)
    points = cv2.reprojectImageTo3D(disp, Q)
    w, l, c = (np.shape(points))

    # print('old mpoint cloud',pts)
    # # print('new point cloud ', pts[~np.isinf((pts)).any(axis=1)])
    colors = cv2.cvtColor(imgL_BGR, cv2.COLOR_BGR2RGB)
    points = np.reshape(points, (w * l, c))
    rgb = np.reshape(colors, (w * l, c))
    rgb = rgb[~np.isinf((points)).any(axis=1)]
    pts = points[~np.isinf((points)).any(axis=1)]
    pts = np.divide(pts, 1000.0)
    v = pptk.viewer(pts, rgb)
    v.set(point_size=0.1)
    v.wait()

    # mask = disp > disp.min()
    # out_points = points[mask]
    # out_colors = colors[mask]
    out_fn = 'out.ply'

    # write_ply('out.ply', out_points, out_colors)

    print('%s saved' % 'out.ply')
    num_disp = 144

    # cv2.imshow('left', imgL_BGR)
    # cv2.imshow('disparity', (disp-min_disp)/num_disp)
    # cv2.waitKey()
    # cv2.destroyAllWindows()

    mask = disp > disp.min()

    # Mask colors and points.

    output_points = points[mask]

    output_colors = colors[mask]

    # Define name for output file

    output_file = 'reconstructed.ply'

    # Generate point cloud

    print("\n Creating the output file... \n")

    create_output(output_points, output_colors, output_file)
    def test_rand100(self):
        np.random.seed(0)
        x = pptk.rand(100, 3)

        pptk.estimate_normals(x, 5, 0.2, verbose=False)
        pptk.estimate_normals(x,
                              5,
                              0.2,
                              output_eigenvalues=True,
                              verbose=False)
        pptk.estimate_normals(x,
                              5,
                              0.2,
                              output_all_eigenvectors=True,
                              verbose=False)
        pptk.estimate_normals(x,
                              5,
                              0.2,
                              output_eigenvalues=True,
                              output_all_eigenvectors=True,
                              verbose=False)

        x = np.float32(x)
        pptk.estimate_normals(x, 5, 0.2, verbose=False)
        pptk.estimate_normals(x,
                              5,
                              0.2,
                              output_eigenvalues=True,
                              verbose=False)
        pptk.estimate_normals(x,
                              5,
                              0.2,
                              output_all_eigenvectors=True,
                              verbose=False)
        pptk.estimate_normals(x,
                              5,
                              0.2,
                              output_eigenvalues=True,
                              output_all_eigenvectors=True,
                              verbose=False)
Пример #3
0
 def test_rand(self):
     P = pptk.rand(10, 3)
     self.assertTrue(type(P) == pptk.Points)
     self.assertTrue(P.dtype == 'float64')
     self.assertTrue(P.shape == (10, 3))
Пример #4
0
 def test_centroids(self):
     np.random.seed(0)
     x = np.float32(pptk.rand(100, 3))
     n = x.NBHDS(k=3)
     y = np.vstack(pptk.MEAN(x[n], axis=0).evaluate())
     self.assertTrue(y.shape == (100, 3))
Пример #5
0
def main():

    if (args.visualise3D):
        init_xyz = pptk.rand(10, 3)
        visualiser = pptk.viewer(init_xyz)

    try:
        #load calibration file
        cal_xml = args.calibration_folder + '/stereo_calibration.xml'
        fs = cv2.FileStorage(cal_xml, flags=cv2.FILE_STORAGE_READ)
        Q = fs.getNode("Q").mat()
        print("Q\n", Q)
        fs.release()

        #load camera images
        left_fns = glob.glob(args.input_folder + '/' + args.left_wildcard)
        right_fns = glob.glob(args.input_folder + '/' + args.right_wildcard)
        pose_fns = glob.glob(args.input_folder + '/' + args.pose_wildcard)

        #check the same number of left and right images exist
        if (not (len(left_fns) == len(right_fns))):
            raise ValueError(
                "Should have the same number of left and right images")

        if (args.pose_transformation):
            if (not (len(left_fns) == len(pose_fns))):
                raise ValueError(
                    "Should have the same number of image as pose files")

        i = 0
        while i < len(left_fns):
            left_fn = left_fns[i]
            right_fn = right_fns[i]
            if (args.pose_transformation):
                pose_fn = pose_fns[i]
                print(pose_fn)
            print(left_fn)
            print(right_fn)

            left_fn_basename = os.path.splitext(os.path.basename(left_fn))[0]
            print(left_fn_basename)

            print("reading images...")
            #read left and right image from file list
            imgL = cv2.imread(left_fn, cv2.IMREAD_GRAYSCALE)
            imgR = cv2.imread(right_fn, cv2.IMREAD_GRAYSCALE)

            # Convert source image to unsigned 8 bit integer Numpy array
            arrL = np.uint8(imgL)
            arrR = np.uint8(imgR)

            print(arrL.shape)
            print(arrR.shape)

            print("stereo matching...")

            #generate disparity using stereo matching algorithms
            if algorithm == CV_MATCHER_BM:
                stereo = cv2.StereoBM_create(numDisparities=num_disp,
                                             blockSize=block_size)
                stereo.setMinDisparity(min_disp)
                stereo.setSpeckleWindowSize(speckle_window_size)
                stereo.setSpeckleRange(speckle_range)
                stereo.setUniquenessRatio(uniqness_ratio)
            elif algorithm == CV_MATCHER_SGBM:
                stereo = cv2.StereoSGBM_create(
                    minDisparity=min_disp,
                    numDisparities=num_disp,
                    blockSize=block_size,
                    P1=8 * 3 * window_size**2,
                    P2=32 * 3 * window_size**2,
                    disp12MaxDiff=1,
                    uniquenessRatio=uniqness_ratio,
                    speckleWindowSize=speckle_window_size,
                    speckleRange=speckle_range)

            disp = stereo.compute(arrL, arrR).astype(np.float32) / 16.0

            print("generating 3D...")
            #reproject disparity to 3D
            points = cv2.reprojectImageTo3D(disp, Q)

            print("saving disparity maps...")

            disp = (disp - min_disp) / num_disp
            cv2.imwrite(
                args.output_folder_disparity +
                "/{}_disparity_map.png".format(left_fn_basename), disp)

            if (args.visualise_disparity):
                #dispay disparity to window
                plt.imshow(disp)
                plt.show(block=False)
                plt.pause(0.1)

            #normalise disparity
            imask = disp > disp.min()
            disp_thresh = np.zeros_like(disp, np.uint8)
            disp_thresh[imask] = disp[imask]

            disp_norm = np.zeros_like(disp, np.uint8)
            cv2.normalize(disp,
                          disp_norm,
                          alpha=0,
                          beta=255,
                          norm_type=cv2.NORM_MINMAX,
                          dtype=cv2.CV_8U)

            cv2.imwrite(
                args.output_folder_disparity +
                "/{}_disparity_image.png".format(left_fn_basename), disp_norm)

            #format colour image from left camera for mapping to point cloud
            h, w = arrL.shape[:2]
            colors = cv2.cvtColor(arrL, cv2.COLOR_BGR2RGB)
            mask = disp > disp.min()
            out_points = points[mask]
            out_colors = colors[mask]

            out_points_fn = args.output_folder_point_clouds + '/{}_point_cloud.ply'.format(
                left_fn_basename)
            out_points_transformed_fn = args.output_folder_point_clouds + '/{}_point_cloud_transformed.ply'.format(
                left_fn_basename)

            if (args.pose_transformation):
                print("transforming point cloud...")

                #extract pose from pose file
                pose_file = open(pose_fn, 'r')
                line = pose_file.readline().rstrip()
                pose = line.split(',')
                if (not (len(pose) == 7)):
                    error_msg = "Invalid number of values in pose data\nShould be in format [x,y,z,w,x,y,z]"
                    raise ValueError(error_msg)
                pose_np = np.array([float(pose[0]),float(pose[1]),float(pose[2]),\
                                    float(pose[3]),float(pose[4]),float(pose[5]),float(pose[6])])

                print("transformation:")
                print(pose_np)
                #get tranlation and quaternion
                pose_t = np.array(
                    [float(pose[0]),
                     float(pose[1]),
                     float(pose[2])])
                pose_q = np.array([
                    float(pose[4]),
                    float(pose[5]),
                    float(pose[6]),
                    float(pose[3])
                ])
                pose_matrix = t_q_to_matrix(pose_t, pose_q)
                #print("transformation matrix:")
                #print(pose_matrix)

                transformed_points = transform_points(out_points, pose_matrix)

            if (args.visualise3D):
                #visualise point cloud
                visualise_points(visualiser, out_points, out_colors)

            if (args.pose_transformation):
                print("saving point clouds...")
                write_ply(out_points_transformed_fn, transformed_points,
                          out_colors)
            else:
                print("saving point cloud...")
            write_ply(out_points_fn, out_points, out_colors)

            i += 1

        if (args.visualise3D):
            visualiser.close()
        if (args.visualise_disparity):
            plt.close()

    except KeyboardInterrupt:
        if (args.visualise3D):
            visualiser.close()
        if (args.visualise_disparity):
            plt.close()
        raise KeyboardInterrupt()