Пример #1
0
def _main():

    np.set_printoptions(precision=2)

    # Defining (picking) point in camera frame
    point_in_camera_frame = np.array([81.2, 18.0, 594.6, 1])
    print(f"Point coordinates in camera frame: {point_in_camera_frame[0:3]}")

    eye_in_hand_transform_file = Path() / get_sample_data_path(
    ) / "EyeInHandTransform.yaml"
    robot_transform_file = Path() / get_sample_data_path(
    ) / "RobotTransform.yaml"
    # Checking if YAML files are valid
    _assert_valid_matrix(eye_in_hand_transform_file)
    _assert_valid_matrix(robot_transform_file)

    print(
        "Reading camera pose in end-effector frame (result of eye-in-hand calibration)"
    )
    transform_end_effector_to_camera = _read_transform(
        eye_in_hand_transform_file)

    print("Reading end-effector pose in robot base frame")
    transform_base_to_end_effector = _read_transform(robot_transform_file)

    print("Computing camera pose in robot base frame")
    transform_base_to_camera = np.matmul(transform_base_to_end_effector,
                                         transform_end_effector_to_camera)

    print("Transforming (picking) point from camera to robot base frame")
    point_in_base_frame = np.matmul(transform_base_to_camera,
                                    point_in_camera_frame)

    print(f"Point coordinates in robot base frame: {point_in_base_frame[0:3]}")
def _main():
    app = zivid.Application()

    # The file_camera file is in Zivid Sample Data. See instructions in README.md
    file_camera = Path() / get_sample_data_path() / "FileCameraZividOne.zfc"

    print(f"Creating virtual camera using file: {file_camera}")
    camera = app.create_file_camera(file_camera)

    print("Configuring settings")
    settings = zivid.Settings()
    settings.acquisitions.append(zivid.Settings.Acquisition())
    settings.processing.filters.smoothing.gaussian.enabled = True
    settings.processing.filters.smoothing.gaussian.sigma = 1.5
    settings.processing.filters.reflection.removal.enabled = True
    settings.processing.filters.reflection.removal.experimental.mode = "global"
    settings.processing.color.balance.red = 1.0
    settings.processing.color.balance.green = 1.0
    settings.processing.color.balance.blue = 1.0

    print("Capturing frame")
    with camera.capture(settings) as frame:
        data_file = "Frame.zdf"
        print(f"Saving frame to file: {data_file}")
        frame.save(data_file)
Пример #3
0
def _main():

    app = zivid.Application()

    data_file = Path() / get_sample_data_path() / "Zivid3D.zdf"
    print(f"Reading point cloud from file: {data_file}")
    frame = zivid.Frame(data_file)

    print("Getting point cloud from frame")
    point_cloud = frame.point_cloud()
    xyz = point_cloud.copy_data("xyz")
    rgba = point_cloud.copy_data("rgba")
    snr = frame.point_cloud().copy_data("snr")

    height = frame.point_cloud().height
    width = frame.point_cloud().width

    print("Point cloud information:")
    print(f"Number of points: {height * width}")
    print(f"Height: {height}, Width: {width}")

    pixels_to_display = 10
    print(
        "Iterating over point cloud and extracting X, Y, Z, R, G, B, and SNR "
        f"for central {pixels_to_display} x {pixels_to_display} pixels")
    for i in range(int((height - pixels_to_display) / 2),
                   int((height + pixels_to_display) / 2)):
        for j in range(int((width - pixels_to_display) / 2),
                       int((width + pixels_to_display) / 2)):
            print(
                f"Values at pixel ({i} , {j}): X:{xyz[i,j,0]:.1f} Y:{xyz[i,j,1]:.1f}"
                f" Z:{xyz[i,j,2]:.1f} R:{rgba[i,j,0]} G:{rgba[i,j,1]} B:{rgba[i,j,2]}"
                f" SNR:{snr[i,j]:.1f}")
Пример #4
0
def _main():

    app = zivid.Application()

    data_file = Path() / get_sample_data_path() / "Zivid3D.zdf"
    print(f"Reading ZDF frame from file: {data_file}")
    frame = zivid.Frame(data_file)

    point_cloud = frame.point_cloud()
    xyz = point_cloud.copy_data("xyz")
    rgba = point_cloud.copy_data("rgba")

    print(
        f"Before downsampling: {point_cloud.width * point_cloud.height} point cloud"
    )

    _display_pointcloud(xyz, rgba[:, :, 0:3])

    print("Downsampling point cloud")
    point_cloud.downsample(zivid.PointCloud.Downsampling.by2x2)
    xyz_donwsampled = point_cloud.copy_data("xyz")
    rgba_downsampled = point_cloud.copy_data("rgba")

    print(
        f"After downsampling: {point_cloud.width * point_cloud.height} point cloud"
    )

    _display_pointcloud(xyz_donwsampled, rgba_downsampled[:, :, 0:3])

    input("Press Enter to close...")
def _main():

    with zivid.Application():

        data_file = Path() / get_sample_data_path(
        ) / "CalibrationBoardInCameraOrigin.zdf"
        print(f"Reading ZDF frame from file: {data_file}")
        frame = zivid.Frame(data_file)
        point_cloud = frame.point_cloud()

        print("Detecting checkerboard and estimating its pose in camera frame")
        transform_camera_to_checkerboard = (
            zivid.calibration.detect_feature_points(
                point_cloud).pose().to_matrix())  # pylint: disable=no-member
        print(
            f"Camera pose in checkerboard frame:\n{transform_camera_to_checkerboard}"
        )

        transform_file = "CameraToCheckerboardTransform.yaml"
        print(
            f"Saving detected checkerboard pose to YAML file: {transform_file}"
        )
        _write_transform(transform_camera_to_checkerboard, transform_file)

        print("Visualizing checkerboard with coordinate system")
        checkerboard_point_cloud = _create_open3d_point_cloud(point_cloud)
        _visualize_checkerboard_point_cloud_with_coordinate_system(
            checkerboard_point_cloud, transform_camera_to_checkerboard)
def _main():
    app = zivid.Application()

    print("Connecting to camera")
    camera = app.connect_camera()

    print("Loading settings from file")
    camera_model = camera.info.model
    settings_file = Path() / get_sample_data_path() / Path("Settings/" + camera_model[0:8] + "/Settings01.yml")
    settings = zivid.Settings.load(settings_file)

    print("Capturing frame")
    with camera.capture(settings) as frame:
        data_file = "Frame.zdf"
        print(f"Saving frame to file: {data_file}")
        frame.save(data_file)
def _main():
    app = zivid.Application()

    print("Connecting to camera")
    camera = app.connect_camera()

    settings_file = Path() / get_sample_data_path(
    ) / "Settings/Zivid One/Settings01.yml"
    print(f"Configuring settings from file: {settings_file}")
    settings = get_settings_from_yaml(settings_file)

    print("Capturing frame")
    with camera.capture(settings) as frame:
        data_file = "Frame.zdf"
        print(f"Saving frame to file: {data_file}")
        frame.save(data_file)
def _main():
    app = zivid.Application()

    print("Connecting to camera")
    camera = app.connect_camera()

    for i in range(1, 4):
        settings_file = Path() / get_sample_data_path(
        ) / f"Settings/Zivid One/Settings0{i :01d}.yml"
        print(f"Configuring settings from file: {settings_file}")
        settings = get_settings_from_yaml(settings_file)

        print("Capturing frame (HDR)")
        with camera.capture(settings) as frame:
            data_file = f"Frame0{i}.zdf"
            print(f"Saving frame to file: {data_file}")
            frame.save(data_file)
Пример #9
0
def _main():
    app = zivid.Application()

    print("Connecting to camera")
    camera = app.connect_camera()

    for i in range(1, 4):
        camera_model = camera.info.model
        settings_file = (
            Path() / get_sample_data_path() / Path("Settings/" + camera_model[0:8] + f"/Settings0{i :01d}.yml")
        )
        print(f"Loading settings from file: {settings_file}")
        settings = zivid.Settings.load(settings_file)

        print("Capturing frame (HDR)")
        with camera.capture(settings) as frame:
            data_file = f"Frame0{i}.zdf"
            print(f"Saving frame to file: {data_file}")
            frame.save(data_file)
Пример #10
0
def _main():
    np.set_printoptions(precision=4, suppress=True)
    print_header(
        "This example shows conversions to/from Transformation Matrix")

    transformation_matrix = get_transformation_matrix_from_yaml(
        Path() / get_sample_data_path() / "RobotTransform.yaml")
    print(f"Transformation Matrix:\n{transformation_matrix}")

    # Extract Rotation Matrix and Translation Vector from Transformation Matrix
    print(f"Rotation Matrix:\n{transformation_matrix[:3,:3]}")
    print(f"Translation Vector:\n{transformation_matrix[:-1, -1]}")

    ###
    # Convert from Zivid to Robot (Transformation Matrix --> any format)
    ###
    print_header("Convert from Zivid (Rotation Matrix) to Robot")
    axis_angle = rotation_matrix_to_axis_angle(transformation_matrix[:3, :3])
    print(f"AxisAngle:\n{axis_angle.axis}, {axis_angle.angle:.4f}")
    rotation_vector = rotation_matrix_to_rotation_vector(
        transformation_matrix[:3, :3])
    print(f"Rotation Vector:\n{rotation_vector}")
    quaternion = rotation_matrix_to_quaternion(transformation_matrix[:3, :3])
    print(f"Quaternion:\n{quaternion}")
    rpy_list = rotation_matrix_to_roll_pitch_yaw(transformation_matrix[:3, :3])

    ###
    # Convert from Robot to Zivid (any format --> Rotation Matrix (part of Transformation Matrix))
    ###
    print_header("Convert from Robot to Zivid (Rotation Matrix)")
    rotation_matrix = axis_angle_to_rotation_matrix(axis_angle)
    print(f"Rotation Matrix from Axis Angle:\n{rotation_matrix}")
    rotation_matrix = rotation_vector_to_rotation_matrix(rotation_vector)
    print(f"Rotation Matrix from Rotation Vector:\n{rotation_matrix}")
    rotation_matrix = quaternion_to_rotation_matrix(quaternion)
    print(f"Rotation Matrix from Quaternion:\n{rotation_matrix}")
    roll_pitch_yaw_to_rotation_matrix(rpy_list)

    # Replace rotation matrix in transformation matrix
    transformation_matrix[:3, :3] = rotation_matrix
    # Save transformation matrix which has passed through quaternion representation
    save_transformation_matrix_to_yaml(transformation_matrix,
                                       "RobotTransformOut.yaml")
Пример #11
0
def _main():

    with zivid.Application():

        data_file = Path() / get_sample_data_path() / "Zivid3D.zdf"
        print(f"Reading ZDF frame from file: {data_file}")
        frame = zivid.Frame(data_file)

        point_cloud = frame.point_cloud()
        xyz = point_cloud.copy_data("xyz")
        rgba = point_cloud.copy_data("rgba")

        print(
            f"Before downsampling: {point_cloud.width * point_cloud.height} point cloud"
        )

        display_pointcloud(xyz, rgba[:, :, 0:3])

        print("Downsampling point cloud")
        print("This does not modify the current point cloud but returns")
        print("the downsampled point cloud as a new point cloud instance.")
        downsampled_point_cloud = point_cloud.downsampled(
            zivid.PointCloud.Downsampling.by2x2)

        print(
            f"After downsampling: {downsampled_point_cloud.width * downsampled_point_cloud.height} point cloud"
        )

        print("Downsampling point cloud (in-place)")
        print("This modifies the current point cloud.")
        point_cloud.downsample(zivid.PointCloud.Downsampling.by2x2)

        xyz_donwsampled = point_cloud.copy_data("xyz")
        rgba_downsampled = point_cloud.copy_data("rgba")

        print(
            f"After downsampling: {point_cloud.width * point_cloud.height} point cloud"
        )

        display_pointcloud(xyz_donwsampled, rgba_downsampled[:, :, 0:3])

        input("Press Enter to close...")
Пример #12
0
def _main():

    with zivid.Application():

        filename_zdf = Path() / get_sample_data_path() / "Zivid3D.zdf"

        print(f"Reading {filename_zdf} point cloud")
        frame = zivid.Frame(filename_zdf)

        point_cloud = frame.point_cloud()
        xyz = point_cloud.copy_data("xyz")
        rgba = point_cloud.copy_data("rgba")

        display_rgb(rgba[:, :, 0:3])

        display_depthmap(xyz)

        display_pointcloud(xyz, rgba[:, :, 0:3])

        input("Press Enter to close...")
Пример #13
0
def _main():
    app = zivid.Application()

    print("Connecting to camera")
    camera = app.connect_camera()

    print("Configuring settings from file")
    camera_model = camera.info.model
    settings_file = Path() / get_sample_data_path() / Path("Settings/" +
                                                           camera_model[0:8] +
                                                           "/Settings01.yml")
    settings = zivid.Settings.load(settings_file)

    print("Enabling diagnostics")
    settings.diagnostics.enabled = True

    print("Capturing frame")
    with camera.capture(settings) as frame:
        data_file = "FrameWithDiagnostics.zdf"
        print(f"Saving frame with diagnostic data to file: {data_file}")
        frame.save(data_file)
Пример #14
0
def _main():

    with zivid.Application():

        data_file = Path() / get_sample_data_path() / "Zivid3D.zdf"
        print(f"Reading ZDF frame from file: {data_file}")
        frame = zivid.Frame(data_file)
        point_cloud = frame.point_cloud()

        print("Converting to BGR image in OpenCV format")
        bgr = _point_cloud_to_cv_bgr(point_cloud)

        bgr_image_file = "ImageRGB.png"
        print(f"Visualizing and saving BGR image to file: {bgr_image_file}")
        _visualize_and_save_image(bgr, bgr_image_file, "BGR image")

        print("Converting to Depth map in OpenCV format")
        z_color_map = _point_cloud_to_cv_z(point_cloud)

        depth_map_file = "DepthMap.png"
        print(f"Visualizing and saving Depth map to file: {depth_map_file}")
        _visualize_and_save_image(z_color_map, depth_map_file, "Depth map")
def _main():

    app = zivid.Application()

    data_file = Path() / get_sample_data_path() / "Zivid3D.zdf"
    print(f"Reading ZDF frame from file: {data_file}")
    frame = zivid.Frame(data_file)

    point_cloud = frame.point_cloud()
    xyz = point_cloud.copy_data("xyz")
    rgba = point_cloud.copy_data("rgba")

    pixels_to_display = 300
    print(
        f"Generating binary mask of central {pixels_to_display} x {pixels_to_display} pixels"
    )
    mask = np.zeros((rgba.shape[0], rgba.shape[1]), np.bool)
    height = frame.point_cloud().height
    width = frame.point_cloud().width
    h_min = int((height - pixels_to_display) / 2)
    h_max = int((height + pixels_to_display) / 2)
    w_min = int((width - pixels_to_display) / 2)
    w_max = int((width + pixels_to_display) / 2)
    mask[h_min:h_max, w_min:w_max] = 1

    _display_rgb(rgba[:, :, 0:3], "RGB image")

    _display_depthmap(xyz)
    _display_pointcloud(xyz, rgba[:, :, 0:3])
    input("Press Enter to continue...")

    print("Masking point cloud")
    xyz_masked = xyz.copy()
    xyz_masked[mask == 0] = np.nan

    _display_depthmap(xyz_masked)
    _display_pointcloud(xyz_masked, rgba[:, :, 0:3])
    input("Press Enter to close...")
Пример #16
0
def _main():

    np.set_printoptions(precision=2)

    while True:
        robot_camera_configuration = input(
            "Enter type of calibration, eth (for eye-to-hand) or eih (for eye-in-hand):"
        ).strip()

        if robot_camera_configuration.lower() == "eth":

            file_name = "ZividGemEyeToHand.zdf"

            # The (picking) point is defined as image coordinates in camera frame. It is hard-coded for the
            # ZividGemEyeToHand.zdf (1035,255) X: 37.77 Y: -145.92 Z: 1227.1
            image_coordinate_x = 1035
            image_coordinate_y = 255

            eye_to_hand_transform_file = Path() / get_sample_data_path(
            ) / "EyeToHandTransform.yaml"
            # Checking if YAML files are valid
            _assert_valid_matrix(eye_to_hand_transform_file)

            print(
                "Reading camera pose in robot base frame (result of eye-to-hand calibration)"
            )
            transform_base_to_camera = _read_transform(
                eye_to_hand_transform_file)

            break

        if robot_camera_configuration.lower() == "eih":

            file_name = "ZividGemEyeInHand.zdf"

            # The (picking) point is defined as image coordinates in camera frame. It is hard-coded for the
            # ZividGemEyeInHand.zdf (1460,755) X: 83.95 Y: 28.84 Z: 305.7
            image_coordinate_x = 1460
            image_coordinate_y = 755

            eye_in_hand_transform_file = Path() / get_sample_data_path(
            ) / "EyeInHandTransform.yaml"
            robot_transform_file = Path() / get_sample_data_path(
            ) / "RobotTransform.yaml"
            # Checking if YAML files are valid
            _assert_valid_matrix(eye_in_hand_transform_file)
            _assert_valid_matrix(robot_transform_file)

            print(
                "Reading camera pose in end-effector frame (result of eye-in-hand calibration)"
            )
            transform_end_effector_to_camera = _read_transform(
                eye_in_hand_transform_file)

            print("Reading end-effector pose in robot base frame")
            transform_base_to_end_effector = _read_transform(
                robot_transform_file)

            print("Computing camera pose in robot base frame")
            transform_base_to_camera = np.matmul(
                transform_base_to_end_effector,
                transform_end_effector_to_camera)

            break

        print("Entered unknown Hand-Eye calibration type")

    with zivid.Application():

        data_file = Path() / get_sample_data_path() / file_name
        print(f"Reading point cloud from file: {data_file}")
        frame = zivid.Frame(data_file)
        point_cloud = frame.point_cloud()

        while True:
            command = input(
                "Enter command, s (to transform single point) or p (to transform point cloud): "
            ).strip()

            if command.lower() == "s":

                print("Transforming single point")

                xyz = point_cloud.copy_data("xyz")

                point_in_camera_frame = np.array([
                    xyz[image_coordinate_y, image_coordinate_x, 0],
                    xyz[image_coordinate_y, image_coordinate_x, 1],
                    xyz[image_coordinate_y, image_coordinate_x, 2],
                    1,
                ])
                print(
                    f"Point coordinates in camera frame: {point_in_camera_frame[0:3]}"
                )

                print(
                    "Transforming (picking) point from camera to robot base frame"
                )
                point_in_base_frame = np.matmul(transform_base_to_camera,
                                                point_in_camera_frame)

                print(
                    f"Point coordinates in robot base frame: {point_in_base_frame[0:3]}"
                )

                break

            if command.lower() == "p":

                print("Transforming point cloud")

                point_cloud.transform(transform_base_to_camera)

                save_file = "ZividGemTransformed.zdf"
                print(f"Saving frame to file: {save_file}")
                frame.save(save_file)

                break

            print("Entered unknown command")