示例#1
0
def _main():

    app = zivid.Application()

    filename_zdf = "Zivid3D.zdf"

    print(f"Reading {filename_zdf} point cloud")
    frame = zivid.Frame(Path() /
                        f"{str(zivid.environment.data_path())}/{filename_zdf}")

    point_cloud = frame.get_point_cloud().to_array()
    xyz = np.dstack([point_cloud["x"], point_cloud["y"], point_cloud["z"]])
    rgb = np.dstack([point_cloud["r"], point_cloud["g"], point_cloud["b"]])

    pixels_to_display = 300
    print(
        f"Generating a binary mask of central {pixels_to_display} x {pixels_to_display} pixels"
    )
    mask = np.zeros((rgb.shape[0], rgb.shape[1]), np.bool)
    height = frame.get_point_cloud().height
    width = frame.get_point_cloud().width
    h_min = int((height - pixels_to_display) / 2)
    h_max = int((height + pixels_to_display) / 2)
    w_min = int((width - pixels_to_display) / 2)
    w_max = int((width + pixels_to_display) / 2)
    mask[h_min:h_max, w_min:w_max] = 1

    print("Masking the point cloud")
    xyz_masked = xyz.copy()
    xyz_masked[mask == 0] = np.nan

    _display_rgb(rgb)

    _display_depthmap(xyz)
    _display_pointcloud(rgb, xyz)
    input("Press Enter to continue...")

    _display_depthmap(xyz_masked)
    _display_pointcloud(rgb, xyz_masked)
    input("Press Enter to close...")
示例#2
0
def _main():

    with zivid.Application():

        data_file = Path() / get_sample_data_path() / "Zivid3D.zdf"
        print(f"Reading ZDF frame from file: {data_file}")
        frame = zivid.Frame(data_file)
        point_cloud = frame.point_cloud()

        print("Converting to BGR image in OpenCV format")
        bgr = _point_cloud_to_cv_bgr(point_cloud)

        bgr_image_file = "ImageRGB.png"
        print(f"Visualizing and saving BGR image to file: {bgr_image_file}")
        _visualize_and_save_image(bgr, bgr_image_file, "BGR image")

        print("Converting to Depth map in OpenCV format")
        z_color_map = _point_cloud_to_cv_z(point_cloud)

        depth_map_file = "DepthMap.png"
        print(f"Visualizing and saving Depth map to file: {depth_map_file}")
        _visualize_and_save_image(z_color_map, depth_map_file, "Depth map")
def _main():

    app = zivid.Application()

    data_file = Path() / get_sample_data_path() / "Zivid3D.zdf"
    print(f"Reading ZDF frame from file: {data_file}")
    frame = zivid.Frame(data_file)

    point_cloud = frame.point_cloud()
    xyz = point_cloud.copy_data("xyz")
    rgba = point_cloud.copy_data("rgba")

    pixels_to_display = 300
    print(
        f"Generating binary mask of central {pixels_to_display} x {pixels_to_display} pixels"
    )
    mask = np.zeros((rgba.shape[0], rgba.shape[1]), np.bool)
    height = frame.point_cloud().height
    width = frame.point_cloud().width
    h_min = int((height - pixels_to_display) / 2)
    h_max = int((height + pixels_to_display) / 2)
    w_min = int((width - pixels_to_display) / 2)
    w_max = int((width + pixels_to_display) / 2)
    mask[h_min:h_max, w_min:w_max] = 1

    _display_rgb(rgba[:, :, 0:3], "RGB image")

    _display_depthmap(xyz)
    _display_pointcloud(xyz, rgba[:, :, 0:3])
    input("Press Enter to continue...")

    print("Masking point cloud")
    xyz_masked = xyz.copy()
    xyz_masked[mask == 0] = np.nan

    _display_depthmap(xyz_masked)
    _display_pointcloud(xyz_masked, rgba[:, :, 0:3])
    input("Press Enter to close...")
示例#4
0
def frame_fixture(application, sample_data_file):  # pylint: disable=unused-argument
    import zivid

    with zivid.Frame(sample_data_file) as frame:
        yield frame
示例#5
0
def perform_hand_eye_calibration(mode: str, data_dir: Path):
    """ Perform had-eye calibration based on mode

    Args:
        mode: Calibration mode, eye-in-hand or eye-to-hand
        data_dir: Path to dataset

    Returns:
        4x4 transformation matrix
        List of residuals

    Raises:
        RuntimeError: If no feature points are detected
        ValueError: If calibration mode is invalid
    """
    # setup zivid
    app = zivid.Application()

    calibration_inputs = []
    idata = 1
    while True:
        frame_file = data_dir / f"img{idata:02d}.zdf"
        pose_file = data_dir / f"pos{idata:02d}.yaml"

        if frame_file.is_file() and pose_file.is_file():

            print(f"Detect feature points from img{idata:02d}.zdf")
            point_cloud = zivid.Frame(frame_file).get_point_cloud()
            detected_features = zivid.hand_eye.detect_feature_points(point_cloud)

            if not detected_features:
                raise RuntimeError(
                    f"Failed to detect feature points from frame {frame_file}"
                )

            print(f"Read robot pose from pos{idata:02d}.yaml")
            with open(pose_file) as file:
                pose = pose_from_datastring(file.read())

            detection_result = zivid.hand_eye.CalibrationInput(pose, detected_features)
            calibration_inputs.append(detection_result)
        else:
            break

        idata += 1

    print(f"\nPerform {mode} calibration")

    if mode == "eye-in-hand":
        calibration_result = zivid.hand_eye.calibrate_eye_in_hand(calibration_inputs)
    elif mode == "eye-to-hand":
        calibration_result = zivid.hand_eye.calibrate_eye_to_hand(calibration_inputs)
    else:
        raise ValueError(f"Invalid calibration mode: {mode}")

    transform = calibration_result.hand_eye_transform
    residuals = calibration_result.per_pose_calibration_residuals

    print("\n\nTransform: \n")
    np.set_printoptions(precision=5, suppress=True)
    print(transform)

    print("\n\nResiduals: \n")
    for res in residuals:
        print(f"Rotation: {res.rotation:.6f}   Translation: {res.translation:.6f}")

    return transform, residuals
示例#6
0
def main():
    output = r"C:\Users\eivin\Desktop\NTNU master-PUMA-2019-2021\3.studhalvår\TPK4560-Specalization-Project\projector-calibration\generated_pattern\chessboard.svg"
    columns = 10
    rows = 7
    p_type = "checkerboard"
    units = "px"
    square_size = 90
    page_size = "CUSTOM"
    # page size dict (ISO standard, mm) for easy lookup. format - size: [width, height]
    page_sizes = {"CUSTOM": [1024, 768], "A1": [594, 840], "A2": [420, 594], "A3": [297, 420], "A4": [210, 297],
                  "A5": [148, 210]}
    page_width = page_sizes[page_size.upper()][0]
    page_height = page_sizes[page_size.upper()][1]
    pm = PatternMaker(columns, rows, output, units, square_size, page_width, page_height)
    
    fx  = 2767.193359
    fy  = 2766.449119
    ppx = 942.942505
    ppy = 633.898
    
    app = zivid.Application()

    intrinsics = np.array([[fx, 0 , ppx], 
                       [0, fy, ppy],
                       [0, 0, 1]])

    distcoeffszivid = np.array([[-0.26513, 0.282772, 0.000767328, 0.000367037,0]])

    



    # Defining the dimensions of checkerboard, colums by rows
    CHECKERBOARD = (9,6)

    # OpenCV optimizes the camera calibration using the Levenberg-Marquardt 
    # algorithm (Simply put, a combination of Gauss-Newton and Gradient Descent)
    # This defines the stopping criteria.
    # The first parameter states that we limit the algorithm both in terms of
    # desired accuracy, as well as number of iterations. 
    # Here we limit it to 30 iterations or an accuracy of 0.001
    # The criteria is also used by the sub-pixel estimator.
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

    P_p_2d = []  #Saved 2D cordinates of the i-th corner in the projector image space. This cordinates are imported from the script gen_pattern.py
    P_c_2d = []  #2D cordinates of the i-th corner in the camera image space found by OPENCV findChessboardCorners.
    P_3d   = []  #These cordinates are obatained from mapping the 2D cordinates of the image space to zivid depth camera view space. 
                 #This is quiered from using the Zivid camera intrinsics to map from pixel cordinates to point in the space.
    P_3d_all = []
    P_3d_obj = []

    nan_pixels = 0
    method = 'corner'
    # Extracting path of individual image stored in a given directory
    images = glob.glob('captured_images/*.png')
    zdf    = glob.glob('zdf/*.zdf')

    for fname in range (0,len(images)):
        img = cv2.imread(images[fname]) #Read the frame name for the images stored in the captured_images folder
        gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

        
        frame = zivid.Frame(zdf[fname])
        point_cloud = frame.get_point_cloud().to_array()
        xyz = np.dstack([point_cloud["x"], point_cloud["y"], point_cloud["z"]])

        #x_indx,y_indx,z_indx = np.meshgrid(np.arange(0, 1920),np.arange(0,1200), np.arange(0,3))
        
        #array_masked = np.ma.masked_invalid(xyz)
        #valid_xs = x_indx[~array_masked.mask]
        #valid_ys = y_indx[~array_masked.mask]
        #valid_zs = z_indx[~array_masked.mask]
        #validarr = array_masked[~array_masked.mask]
        
        #xyz_interp = griddata((valid_xs, valid_ys, valid_zs), validarr.ravel(),
        #                            (x_indx, y_indx, z_indx), method='nearest')
    
        # Find the chess board corners
        # If desired number of corners are found in the image then ret = true
        ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
        if ret == False:
            print(fname)
        """
        If desired number of corner are detected,
        we refine the pixel coordinates and display 
        them on the images of checker board
        """
        if ret == True:
            P_3d = []
            
            corners2 = cv2.cornerSubPix(gray, corners, (11,11),(-1,-1), criteria)
            if method == 'corner':
                corners2 = find_center_of_4_cordinates(corners2, 6, 9)
                corners2 = np.reshape(corners2,(40,1,2))
                P_p_2d.append(find_center_of_4_cordinates(pm.save_2d_projector_corner_cordinates(), 6, 9))
            else:
                P_p_2d.append(pm.save_2d_projector_corner_cordinates())
            P_c_2d.append(corners2)
            #print("Cordinates found in camera fram from findchessboardcorners: \n",corners2[0:10])
            for i in range(len(corners2)):
                if np.isnan(xyz[int(corners2[i][0][1])][int(corners2[i][0][0])]).any() == True:
                    nan_pixels += 1
                    P_3d.append(interpolatexyz(xyz,int(corners2[i][0][0]),int(corners2[i][0][1]), 100))
                else:
                    P_3d.append(xyz[int(corners2[i][0][1])][int(corners2[i][0][0])])
                #P_3d.append(xyz_interp[int(corners2[i][0][1])][int(corners2[i][0][0])])
            P_3d_all.append(P_3d)
            c_3 = np.identity(3)-(1/3)*np.ones((3,3))
            P_3d_t = np.transpose(P_3d)
            centroid = np.average(P_3d_t,axis = 1)
            P_3d_c = (P_3d_t.T-np.average(P_3d_t,axis = 1)).T   
            u, s, vh = np.linalg.svd(P_3d_c)
            P_3d_rotate_t = np.dot(np.transpose(u), P_3d_c)
            #P_3d_rotate_t = np.transpose(P_3d_rotate)
            P_3d_obj.append(P_3d_rotate_t.T)
            
            # Draw and display the corners
            #img = cv2.drawChessboardCorners(img, CHECKERBOARD, corners2, ret)

            #cv2.imshow(images[fname],img)
            #plt.imshow(img)
            #plt.show()
            #cv2.waitKey(0)
            #print('Cordinates in frame ', fname, ':\n File location \n ', images[fname], '\n', zdf[fname])
            str = ('``` '
                ' \n'
                'Projector image cordinates     Camera image cordinates    3D cordinates \n'
                #this line is the part I need help with: list comprehension for the 3 lists to output the values as shown below
                '```')

            #cordinates = "\n".join("{} {} {}".format(x, y, z) for x, y, z in zip(P_p_2d[fname], corners2, P_3d))
            #print(str)
            #print(cordinates)  
                        
            #visualizepointcloud(P_3d,zdf[fname]+"3D Cordinates")
            #visualizepointcloud(P_3d_c.T,zdf[fname]+"3D Cordinates centered")
            #visualizepointcloud(P_3d_rotate_t.T,zdf[fname]+"3D Cordinates rotated")          
        
        
    cv2.destroyAllWindows()

    P_3d_obj_plan = P_3d_obj
    P_3d_obj_plan = np.asarray(P_3d_obj_plan)
    P_3d_obj = np.asarray(P_3d_obj)
    P_3d_obj[:,:,2] = 0
    P_3d_obj = P_3d_obj.astype('float32') 
    P_p_2d = np.asarray(P_p_2d)
    P_c_2d = np.asarray(P_c_2d)
    P_p_2d = P_p_2d.astype('float32')
    P_c_2d = P_c_2d.astype('float32')
    #P_c_2d = np.reshape(P_c_2d, (len(P_c_2d),len(P_c_2d[0]),len(P_c_2d[0][0])))
    size=(1024,768)
    #ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(P_3d_obj, P_p_2d,size , None, 4,None,None,cv2.CALIB_ZERO_TANGENT_DIST, criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 2e-16))
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(P_3d_obj, P_p_2d,size , None, None)

    #retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(P_3d_obj, P_p_2d, P_c_2d, mtx, dist, intrinsics, distcoeffszivid, size)
    retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(P_3d_obj, P_c_2d, P_p_2d, intrinsics, distcoeffszivid, mtx, dist, size)
    print("Projector instrinsic matrix : \n")
    print(mtx)
    print("Projector disortion coeffsients : \n")
    print(dist)
    #print("rvecs : \n")
    #print(rvecs)
    #print("tvecs : \n")
    #print(tvecs)

    P_p_2d = np.reshape(P_p_2d, (len(P_p_2d),len(P_p_2d[0]),1,len(P_p_2d[0][0])))
    mean_error = 0
    Re_projection_frames = []
    for i in range(len(P_3d_obj)):
        imgpoints2, _ = cv2.projectPoints(P_3d_obj[i], rvecs[i], tvecs[i], mtx, dist)
        error = cv2.norm(P_p_2d[i], imgpoints2, cv2.NORM_L2)/len(imgpoints2)
        print(images[i],"error number :", error, "\n")
        mean_error += error
        Re_projection_frames.append([i+1,error])

    Re_projection_frames = np.asarray(Re_projection_frames)
    Re_porojection_errror_over_mean = []
    for i in range (0, len(Re_projection_frames)):
        if Re_projection_frames[i][1] > (mean_error/len(P_3d_obj)):
            Re_porojection_errror_over_mean.append([images[i],Re_projection_frames[i][1]])


    planar_deviation = P_3d_obj_plan[:,:,2]
    mean_frames = []
    for i in range(0, len(planar_deviation)):
        mean_frame = np.mean(np.abs(planar_deviation[i]))
        mean_frames.append([i+1, mean_frame])
    mean_frames = np.asarray(mean_frames)
    totmean_dev = np.mean(np.abs(planar_deviation))

    print( "\nTotal error Re-projection Error: {}".format(mean_error/len(P_3d_obj)) )
    print("Number of nan pixels cordinates in the depth map:", nan_pixels)

    T_C_P = np.column_stack((R,T))
    print("\nTransformation matrix from camera to projector frame:\n")
    print(T_C_P)

    fig, ax = plt.subplots(figsize=(8,8))
    fig.patch.set_visible(False)
    ax.scatter(Re_projection_frames[:,0] , Re_projection_frames[:,1], s=5.0, color = "b", label = "Frames")
    ax.grid(True)
    
    ax.plot(Re_projection_frames[:,0], np.full((len(P_3d_obj,)),(mean_error/len(P_3d_obj))), color = "r",label = "Mean= {} px".format(np.round_((mean_error/len(P_3d_obj)),3)), linestyle = "--") #rett linje 
    ax.set(title='Re-Projection Error {}'.format(method) , ylabel= "Re-Projection Error (px)",  xlabel='Frames')
    legend = ax.legend(loc=(0.7,-0.14), shadow=True, fontsize='large')

    # Put a nicer background color on the legend.
    legend.get_frame().set_facecolor('#afeeee')

    fig.savefig('Re-Projection-{}.png'.format(method))

    fig, ax = plt.subplots(figsize=(8,8))
    fig.patch.set_visible(False) # Treng kanskje ikkje 
    ax.scatter(mean_frames[:,0] , mean_frames[:,1], s=5.0, color = "b", label = "Mean Frame")
    ax.grid(True)
    #y_mean_e = [mean_error/len(P_3d_obj), mean_error/len(P_3d_obj)]
    ax.plot(mean_frames[:,0], np.full(len(mean_frames), totmean_dev), color = "r",label = "Total Mean= \u00B1 {} mm".format(np.round_((totmean_dev/1.0),2)), linestyle = "--") #rett linje 
    ax.set(title='Plane Tolerance Deviation {}'.format(method) , ylabel= u"\u00B1 Deviation (mm)",  xlabel='Frames')
    legend = ax.legend(loc=(0.7,-0.14), shadow=True, fontsize='large')

    # Put a nicer background color on the legend.
    legend.get_frame().set_facecolor('#afeeee')

    fig.savefig('Planar-Deviation-{}.png'.format(method))

    point_cloud = open3d.geometry.PointCloud()
    point_cloud.points = open3d.utility.Vector3dVector(P_3d)
    c_frame = open3d.geometry.TriangleMesh.create_coordinate_frame(size=200, origin=[0, 0, 0])
    p_frame = copy.deepcopy(c_frame)
    p_frame.rotate(R, center = (0,0,0))
    p_frame = copy.deepcopy(p_frame).translate((T[0][0], T[1][0], T[2][0]))
    open3d.visualization.draw_geometries([point_cloud ,c_frame, p_frame])
示例#7
0
def _main():

    np.set_printoptions(precision=2)

    while True:
        robot_camera_configuration = input(
            "Enter type of calibration, eth (for eye-to-hand) or eih (for eye-in-hand):"
        ).strip()

        if robot_camera_configuration.lower() == "eth":

            file_name = "ZividGemEyeToHand.zdf"

            # The (picking) point is defined as image coordinates in camera frame. It is hard-coded for the
            # ZividGemEyeToHand.zdf (1035,255) X: 37.77 Y: -145.92 Z: 1227.1
            image_coordinate_x = 1035
            image_coordinate_y = 255

            eye_to_hand_transform_file = Path() / get_sample_data_path(
            ) / "EyeToHandTransform.yaml"
            # Checking if YAML files are valid
            _assert_valid_matrix(eye_to_hand_transform_file)

            print(
                "Reading camera pose in robot base frame (result of eye-to-hand calibration)"
            )
            transform_base_to_camera = _read_transform(
                eye_to_hand_transform_file)

            break

        if robot_camera_configuration.lower() == "eih":

            file_name = "ZividGemEyeInHand.zdf"

            # The (picking) point is defined as image coordinates in camera frame. It is hard-coded for the
            # ZividGemEyeInHand.zdf (1460,755) X: 83.95 Y: 28.84 Z: 305.7
            image_coordinate_x = 1460
            image_coordinate_y = 755

            eye_in_hand_transform_file = Path() / get_sample_data_path(
            ) / "EyeInHandTransform.yaml"
            robot_transform_file = Path() / get_sample_data_path(
            ) / "RobotTransform.yaml"
            # Checking if YAML files are valid
            _assert_valid_matrix(eye_in_hand_transform_file)
            _assert_valid_matrix(robot_transform_file)

            print(
                "Reading camera pose in end-effector frame (result of eye-in-hand calibration)"
            )
            transform_end_effector_to_camera = _read_transform(
                eye_in_hand_transform_file)

            print("Reading end-effector pose in robot base frame")
            transform_base_to_end_effector = _read_transform(
                robot_transform_file)

            print("Computing camera pose in robot base frame")
            transform_base_to_camera = np.matmul(
                transform_base_to_end_effector,
                transform_end_effector_to_camera)

            break

        print("Entered unknown Hand-Eye calibration type")

    with zivid.Application():

        data_file = Path() / get_sample_data_path() / file_name
        print(f"Reading point cloud from file: {data_file}")
        frame = zivid.Frame(data_file)
        point_cloud = frame.point_cloud()

        while True:
            command = input(
                "Enter command, s (to transform single point) or p (to transform point cloud): "
            ).strip()

            if command.lower() == "s":

                print("Transforming single point")

                xyz = point_cloud.copy_data("xyz")

                point_in_camera_frame = np.array([
                    xyz[image_coordinate_y, image_coordinate_x, 0],
                    xyz[image_coordinate_y, image_coordinate_x, 1],
                    xyz[image_coordinate_y, image_coordinate_x, 2],
                    1,
                ])
                print(
                    f"Point coordinates in camera frame: {point_in_camera_frame[0:3]}"
                )

                print(
                    "Transforming (picking) point from camera to robot base frame"
                )
                point_in_base_frame = np.matmul(transform_base_to_camera,
                                                point_in_camera_frame)

                print(
                    f"Point coordinates in robot base frame: {point_in_base_frame[0:3]}"
                )

                break

            if command.lower() == "p":

                print("Transforming point cloud")

                point_cloud.transform(transform_base_to_camera)

                save_file = "ZividGemTransformed.zdf"
                print(f"Saving frame to file: {save_file}")
                frame.save(save_file)

                break

            print("Entered unknown command")
示例#8
0
def handeye_eth_frames_fixture(application):
    path = _testdata_dir() / "handeye" / "eth"
    frames = [
        zivid.Frame(file_path) for file_path in sorted(path.glob("*.zdf"))
    ]
    yield frames
示例#9
0
def _main() -> None:

    # Get args
    parser = argparse.ArgumentParser(description="Capture turntable data")
    parser.add_argument("label", type=str, help="label for dataset")
    parser.add_argument(
        "--eq-hist",
        action="store_true",
        help="equalize histogram when detecting markers",
    )
    args = parser.parse_args()
    print(args)

    # Load frames from directory
    label = args.label
    datadir = Path(".") / make_casedir_name(label)
    print(f"Loading frames from {datadir}")
    _ = zivid.Application()
    frames = [zivid.Frame(filepath) for filepath in datadir.glob("*.zdf")]
    print(f"Loaded {len(frames)} frames")

    # Get transforms
    print("Detecting markers and calculating transforms for each frame")
    transforms = get_transforms(frames, equalize_hist=args.eq_hist)

    # Convert to Open3D
    print("Converting to Open3D PointCloud")
    point_cloud_originals = [
        frame_to_open3d_pointcloud(frame) for frame in frames
    ]

    # Preprocessing
    print("Filtering based on normals")
    point_clouds = [
        remove_divergent_normals(pcd, threshold=0.6)
        for pcd in point_cloud_originals
    ]

    print("Adjusting colors based on normals")
    point_clouds = [adjust_colors_from_normals(pcd) for pcd in point_clouds]

    # Transform to same coordinate system
    print("Applying transforms")
    for pcd, transform in zip(point_clouds, transforms):
        print(transform)
        pcd.transform(transform)

    # Floor removal
    print("Removing floor from every point cloud")
    point_clouds = [
        remove_by_z_threshold(pcd, z_threshold=5.0) for pcd in point_clouds
    ]

    # Outlier removal
    print("Removing outliers from every point cloud")
    point_clouds = [clean_outlier_blobs(pcd) for pcd in point_clouds]

    # Stitching
    print("Stitching/combining point clouds")
    pcd = stitch(point_clouds)
    outfile_pre_downsample = datadir / "pre_downsample.ply"
    print(f"Saving to {outfile_pre_downsample}")
    o3d.io.write_point_cloud(str(outfile_pre_downsample), pcd)

    # Downsampling
    print("Downsampling")
    pcd = pcd.voxel_down_sample(voxel_size=0.25)
    pcd.estimate_normals()

    # Save to file
    outfile_post_downsample = datadir / "post_downsample.ply"
    print(f"Saving to {outfile_post_downsample}")
    o3d.io.write_point_cloud(str(outfile_post_downsample), pcd)

    # Visualize
    o3d.visualization.draw_geometries([pcd])
示例#10
0
 def Load_Zdf_Frames(self, fname):
     app = zivid.Application()
     frame = zivid.Frame(self.zdf[fname])
     point_cloud = frame.get_point_cloud().to_array()
     xyz = np.dstack([point_cloud["x"], point_cloud["y"], point_cloud["z"]])
     return xyz
def _main():

    while True:
        robot_camera_configuration = input(
            "Enter type of calibration, eth (for eye-to-hand) or eih (for eye-in-hand):"
        ).strip()
        if robot_camera_configuration.lower() == "eth" or robot_camera_configuration.lower() == "eih":
            break
        print("Entered unknown Hand-Eye calibration type")

    args = _args()
    path = args.input_path

    list_of_paths_to_hand_eye_dataset_point_clouds = _path_list_creator(path, "img", 2, ".zdf")

    list_of_paths_to_hand_eye_dataset_robot_poses = _path_list_creator(path, "pos", 2, ".yaml")

    if len(list_of_paths_to_hand_eye_dataset_robot_poses) != len(list_of_paths_to_hand_eye_dataset_point_clouds):
        raise Exception("The number of point clouds (ZDF iles) and robot poses (YAML files) must be the same")

    if len(list_of_paths_to_hand_eye_dataset_robot_poses) == 0:
        raise Exception("There are no robot poses (YAML files) in the data folder")

    if len(list_of_paths_to_hand_eye_dataset_point_clouds) == 0:
        raise Exception("There are no point clouds (ZDF files) in the data folder")

    if len(list_of_paths_to_hand_eye_dataset_robot_poses) == len(list_of_paths_to_hand_eye_dataset_point_clouds):

        with zivid.Application():

            hand_eye_transform = _read_transform(path / "transformation.yaml")

            number_of_dataset_pairs = len(list_of_paths_to_hand_eye_dataset_point_clouds)

            list_of_open_3d_point_clouds = []
            for data_pair_id in range(number_of_dataset_pairs):

                # Updating the user about the process status through the terminal
                print(f"{data_pair_id} / {number_of_dataset_pairs} - {100*data_pair_id / number_of_dataset_pairs}%")

                # Reading point cloud from file
                point_cloud = zivid.Frame(list_of_paths_to_hand_eye_dataset_point_clouds[data_pair_id]).point_cloud()

                robot_pose = _read_transform(list_of_paths_to_hand_eye_dataset_robot_poses[data_pair_id])

                # Transforms point cloud to the robot end-effector frame
                if robot_camera_configuration.lower() == "eth":
                    inv_robot_pose = np.linalg.inv(robot_pose)
                    point_cloud_transformed = point_cloud.transform(np.matmul(inv_robot_pose, hand_eye_transform))

                # Transforms point cloud to the robot base frame
                if robot_camera_configuration.lower() == "eih":
                    point_cloud_transformed = point_cloud.transform(np.matmul(robot_pose, hand_eye_transform))

                xyz = point_cloud_transformed.copy_data("xyz")
                rgba = point_cloud_transformed.copy_data("rgba")

                # Finding Cartesian coordinates of the checkerboard center point
                detection_result = zivid.calibration.detect_feature_points(point_cloud_transformed)

                if detection_result.valid():
                    # Extracting the points within the ROI (checkerboard)
                    xyz_filtered = _filter_checkerboard_roi(xyz, detection_result.centroid())

                    # Converting from NumPy array to Open3D format
                    point_cloud_open3d = _create_open3d_point_cloud(rgba, xyz_filtered)

                    # Saving point cloud to PLY file
                    o3d.io.write_point_cloud(f"img{data_pair_id + 1}.ply", point_cloud_open3d)

                    # Appending the Open3D point cloud to a list for visualization
                    list_of_open_3d_point_clouds.append(point_cloud_open3d)

    print(f"{number_of_dataset_pairs} / {number_of_dataset_pairs} - 100.0%")
    print("\nAll done!\n")

    if data_pair_id > 1:
        print("Visualizing transformed point clouds\n")
        o3d.visualization.draw_geometries(list_of_open_3d_point_clouds)  # pylint: disable=no-member
    else:
        raise Exception("Not enought data!")
示例#12
0
def load_zdf(file: str):
    with zivid.Application() as app:
        image = zivid.Frame(file)
        pointCloud = image.get_point_cloud().to_array()
    return pointCloud