Esempio n. 1
0
def test_destagger_type_good(meta, dtype) -> None:
    """Check that destaggering preserves dtype."""
    h = meta.format.pixels_per_column
    w = meta.format.columns_per_frame

    assert client.destagger(meta, np.zeros((h, w), dtype)).dtype == dtype
    assert client.destagger(meta, np.zeros((h, w, 2), dtype)).dtype == dtype
Esempio n. 2
0
def plot_all_channels(hostname: str,
                      lidar_port: int = 7502,
                      n_scans: int = 5) -> None:
    """Display all channels of n consecutive lidar scans taken live from sensor

    Args:
        hostname: hostname of the sensor
        lidar_port: UDP port to listen on for lidar data
        n_scans: number of scans to show
    """
    import matplotlib.pyplot as plt  # type: ignore

    # [doc-stag-display-all-2d]
    # take sample of n scans from sensor
    metadata, sample = client.Scans.sample(hostname, n_scans, lidar_port)

    # initialize and configure subplots
    fig, axarr = plt.subplots(n_scans,
                              4,
                              sharex=True,
                              sharey=True,
                              figsize=(12.0, n_scans * .75),
                              tight_layout=True)
    fig.suptitle("{} consecutive scans from {}".format(n_scans, hostname))
    fig.canvas.set_window_title("example: display_all_2D")

    # set row and column titles of subplots
    column_titles = ["range", "reflectivity", "near_ir", "signal"]
    row_titles = ["Scan {}".format(i) for i in list(range(n_scans))]
    for ax, column_title in zip(axarr[0], column_titles):
        ax.set_title(column_title)
    for ax, row_title in zip(axarr[:, 0], row_titles):
        ax.set_ylabel(row_title)

    # plot 2D scans
    for count, scan in enumerate(next(sample)):
        axarr[count, 0].imshow(
            client.destagger(metadata, scan.field(client.ChanField.RANGE)))
        axarr[count, 1].imshow(
            client.destagger(metadata,
                             scan.field(client.ChanField.REFLECTIVITY)))
        axarr[count, 2].imshow(
            client.destagger(metadata, scan.field(client.ChanField.NEAR_IR)))
        axarr[count, 3].imshow(
            client.destagger(metadata, scan.field(client.ChanField.SIGNAL)))
    # [doc-etag-display-all-2d]

    # configure and show plot
    [ax.get_xaxis().set_visible(False) for ax in axarr.ravel()]
    [ax.set_yticks([]) for ax in axarr.ravel()]
    [ax.set_yticklabels([]) for ax in axarr.ravel()]
    plt.show()
Esempio n. 3
0
def test_destagger_inverse(meta) -> None:
    """Check that stagger/destagger are inverse operations."""
    h = meta.format.pixels_per_column
    w = meta.format.columns_per_frame
    a = np.arange(h * w).reshape((h, w))

    b = client.destagger(meta, a, inverse=True)
    c = client.destagger(meta, b)
    assert np.array_equal(a, c)

    d = client.destagger(meta, a)
    e = client.destagger(meta, d, inverse=True)
    assert np.array_equal(a, e)
Esempio n. 4
0
def pcap_show_one_scan(source: client.PacketSource,
                       metadata: client.SensorInfo,
                       num: int = 0,
                       destagger: bool = True) -> None:
    """Plot all channels of one scan in 2D using matplotlib."""
    import matplotlib.pyplot as plt  # type: ignore

    scan = nth(client.Scans(source), num)
    if not scan:
        print(f"ERROR: Scan # {num} in not present in pcap file")
        exit(1)

    # [doc-stag-pcap-show-one]
    fig = plt.figure(constrained_layout=True)
    axs = fig.subplots(len(client.ChanField), 1, sharey=True)

    for ax, field in zip(axs, client.ChanField):
        img = normalize(scan.field(field))
        if destagger:
            img = client.destagger(metadata, img)

        ax.set_title(str(field), fontdict={'fontsize': 10})
        ax.imshow(img, cmap='gray', resample=False)
        ax.set_yticklabels([])
        ax.set_yticks([])
        ax.set_xticks([0, scan.w])
    plt.show()
Esempio n. 5
0
def live_plot_signal(hostname: str, lidar_port: int = 7502) -> None:
    """Display signal from live sensor

    Args:
        hostname: hostname of the sensor
        lidar_port: UDP port to listen on for lidar data

    """
    import cv2  # type: ignore

    print("press ESC from visualization to exit")

    # [doc-stag-live-plot-signal]
    # establish sensor connection
    with closing(client.Scans.stream(hostname, lidar_port,
                                     complete=False)) as stream:
        show = True
        while show:
            for scan in stream:
                # uncomment if you'd like to see frame id printed
                # print("frame id: {} ".format(scan.frame_id))
                signal = client.destagger(stream.metadata,
                                          scan.field(client.ChanField.SIGNAL))
                signal = (signal / np.max(signal) * 255).astype(np.uint8)
                cv2.imshow("scaled signal", signal)
                key = cv2.waitKey(1) & 0xFF
                # [doc-etag-live-plot-signal]
                # 27 is esc
                if key == 27:
                    show = False
                    break
        cv2.destroyAllWindows()
Esempio n. 6
0
def plot_range_image(hostname: str, lidar_port: int = 7502) -> None:
    """Display range data taken live from sensor as an image

    Args:
        hostname: hostname of the sensor
        lidar_port: UDP port to listen on for lidar data
    """
    import matplotlib.pyplot as plt  # type: ignore

    # get single scan [doc-stag-single-scan]
    metadata, sample = client.Scans.sample(hostname, 1, lidar_port)
    scan = next(sample)[0]
    # [doc-etag-single-scan]

    # initialize plot
    fig, ax = plt.subplots()
    fig.canvas.set_window_title("example: plot_range_image")

    # plot using imshow
    range = scan.field(client.ChanField.RANGE)
    plt.imshow(client.destagger(metadata, range), resample=False)

    # configure and show plot
    plt.title("Range Data from {}".format(hostname))
    plt.axis('off')
    plt.show()
Esempio n. 7
0
def filter_3d_by_range_and_azimuth(hostname: str,
                                   lidar_port: int = 7502,
                                   range_min: int = 2) -> None:
    """Easily filter 3D Point Cloud by Range and Azimuth Using the 2D Representation

    Args:
        hostname: hostname of sensor
        lidar_port: UDP port to listen on for lidar data
        range_min: range minimum in meters
    """
    import matplotlib.pyplot as plt  # type: ignore
    import math

    # set up figure
    plt.figure()
    ax = plt.axes(projection='3d')
    r = 3
    ax.set_xlim3d([-r, r])
    ax.set_ylim3d([-r, r])
    ax.set_zlim3d([-r, r])

    plt.title("Filtered 3D Points from {}".format(hostname))

    metadata, sample = client.Scans.sample(hostname, 2, lidar_port)
    scan = next(sample)[1]

    # [doc-stag-filter-3d]
    # obtain destaggered range
    range_destaggered = client.destagger(metadata,
                                         scan.field(client.ChanField.RANGE))

    # obtain destaggered xyz representation
    xyzlut = client.XYZLut(metadata)
    xyz_destaggered = client.destagger(metadata, xyzlut(scan))

    # select only points with more than min range using the range data
    xyz_filtered = xyz_destaggered * (range_destaggered[:, :, np.newaxis] >
                                      (range_min * 1000))

    # get first 3/4 of scan
    to_col = math.floor(metadata.mode.cols * 3 / 4)
    xyz_filtered = xyz_filtered[:, 0:to_col, :]
    # [doc-etag-filter-3d]

    [x, y, z] = [c.flatten() for c in np.dsplit(xyz_filtered, 3)]
    ax.scatter(x, y, z, c=z / max(z), s=0.2)
    plt.show()
Esempio n. 8
0
def test_destagger_xyz(meta, scan) -> None:
    """Check that we can destagger the output of xyz projection."""
    h = meta.format.pixels_per_column
    w = meta.format.columns_per_frame
    xyz = client.XYZLut(meta)(scan)

    destaggered = client.destagger(meta, xyz)
    assert destaggered.shape == (h, w, 3)
Esempio n. 9
0
def test_destagger_correct(meta, scan) -> None:
    """Compare client destagger function to reference implementation."""

    # get destaggered range field using reference implementation
    destagger_ref = reference.destagger(meta.format.pixel_shift_by_row,
                                        scan.field(client.ChanField.RANGE))

    # obtain destaggered range field using client implemenation
    destagger_client = client.destagger(meta,
                                        scan.field(client.ChanField.RANGE))

    assert np.array_equal(destagger_ref, destagger_client)
Esempio n. 10
0
def test_destagger_correct_multi(meta, scan) -> None:
    """Compare client destagger function to reference on stacked fields."""

    near_ir = scan.field(client.ChanField.NEAR_IR)
    near_ir_stacked = np.repeat(near_ir[..., None], 5, axis=2)

    ref = reference.destagger(meta.format.pixel_shift_by_row, near_ir)
    ref_stacked = np.repeat(ref[..., None], 5, axis=2)

    destaggered_stacked = client.destagger(meta, near_ir_stacked)

    assert near_ir_stacked.dtype == np.uint32
    assert destaggered_stacked.dtype == np.uint32
    assert np.array_equal(ref_stacked, destaggered_stacked)
Esempio n. 11
0
def pcap_2d_viewer(source: client.PacketSource,
                   metadata: client.SensorInfo,
                   num: int = 0) -> None:
    """Visualize channel fields in 2D using opencv."""
    import cv2  # type: ignore

    # [doc-stag-pcap-display-live]
    print("press ESC from visualization to exit")

    quit = False
    paused = False
    destagger = True
    num = 0
    for scan in client.Scans(source):
        print("frame id: {}, num = {}".format(scan.frame_id, num))

        fields = [scan.field(ch) for ch in client.ChanField]
        if destagger:
            fields = [client.destagger(metadata, f) for f in fields]

        combined_images = np.vstack(
            [np.pad(normalize(f), 2, constant_values=1.0) for f in fields])

        cv2.imshow("4 channels: ", combined_images)

        # handle keys presses
        while True:
            key = cv2.waitKey(1) & 0xFF
            # 100 is d
            if key == 100:
                destagger = not destagger
            # 32 is SPACE
            if key == 32:
                paused = not paused
            # 27 is ESC
            elif key == 27:
                quit = True

            if not paused:
                break
            time.sleep(0.1)

        if quit:
            break
        num += 1

    cv2.destroyAllWindows()
Esempio n. 12
0
    def update_data(vis: o3d.visualization.Visualizer):
        xyz = xyzlut(scan.field(range_for_field(fields[field_ind])))
        key = scan.field(fields[field_ind]).astype(float)

        # apply colormap to field values
        aes[field_ind](key)
        color_img = colorize(key)

        # prepare point cloud for Open3d Visualiser
        cloud.points = o3d.utility.Vector3dVector(xyz.reshape((-1, 3)))
        cloud.colors = o3d.utility.Vector3dVector(color_img.reshape((-1, 3)))

        # prepare canvas for 2d image
        gray_img = np.dstack([key] * 3)
        canvas_set_image_data(image, client.destagger(metadata, gray_img))

        # signal that point cloud and needs to be re-rendered
        vis.update_geometry(cloud)
Esempio n. 13
0
def pcap_2d_viewer(
        pcap_path: str,
        metadata_path: str,
        num: int = 0,  # not used in this example
        rate: float = 0.0) -> None:
    """Simple sensor field visualization pipeline as 2D images from pcap file
    (*pcap_path*)

    Args:
        pcap_path: path to the pcap file
        metadata_path: path to the .json with metadata (aka :class:`.SensorInfo`)
        rate: read speed of packets from the pcap file (**1.0** - corresponds to
              real-time by packets timestamp, **0.0** - as fast as it reads from
              file without any delay)

    """
    import cv2  # type: ignore

    # [doc-stag-pcap-display-live]
    metadata = read_metadata(metadata_path)

    source = pcap.Pcap(pcap_path, metadata, rate=rate)

    with closing(source) as source:
        scans = iter(client.Scans(source))

        print("press ESC from visualization to exit")

        channels = [
            client.ChanField.RANGE, client.ChanField.SIGNAL,
            client.ChanField.NEAR_IR, client.ChanField.REFLECTIVITY
        ]

        paused = False
        destagger = True
        num = 0
        scan = next(scans, None)
        while scan:
            print("frame id: {}, num = {}".format(scan.frame_id, num))

            fields_values = [scan.field(ch) for ch in channels]

            if destagger:
                fields_values = [
                    client.destagger(metadata, field_val)
                    for field_val in fields_values
                ]

            fields_images = [ae(field_val) for field_val in fields_values]

            combined_images = np.vstack(
                [np.pad(img, 2, constant_values=1.0) for img in fields_images])

            cv2.imshow("4 channels: ", combined_images)

            key = cv2.waitKey(1) & 0xFF
            # 100 is d
            if key == 100:
                destagger = not destagger
            # 32 is SPACE
            if key == 32:
                paused = not paused
            # 27 is ESC
            elif key == 27:
                break

            if not paused:
                scan = next(scans, None)
                num += 1

        cv2.destroyAllWindows()
Esempio n. 14
0
def pcap_to_csv(pcap_path: str,
                metadata_path: str,
                num: int = 0,
                csv_dir: str = ".",
                csv_prefix: str = "pcap_out",
                csv_ext: str = "csv") -> None:
    """Write scans from pcap file (*pcap_path*) to plain csv files (one per
    lidar scan).

    If the *csv_ext* ends in ``.gz``, the file is automatically saved in
    compressed gzip format. :func:`.numpy.loadtxt` can be used to read gzipped
    files transparently back to :class:`.numpy.ndarray`.

    Number of saved lines per csv file is always [H x W], which corresponds
    to a full 2D image representation of a lidar scan.

    Each line in a csv file is:

        RANGE (mm), SIGNAL, NEAR_IR, REFLECTIVITY, X (m), Y (m), Z (m)

    Args:
        pcap_path: path to the pcap file
        metadata_path: path to the .json with metadata (aka :class:`.SensorInfo`)
        num: number of scans to save from pcap to csv files
        csv_dir: path to the directory where csv files will be saved
        csv_prefix: the filename prefix that will be appended with frame number
                    and *csv_ext*
        csv_ext: file extension to use. If it ends with ``.gz`` the output is
                 gzip compressed

    """
    from itertools import islice

    # ensure that base csv_dir exists
    if not os.path.exists(csv_dir):
        os.makedirs(csv_dir)

    metadata = read_metadata(metadata_path)
    source = pcap.Pcap(pcap_path, metadata)

    # [doc-stag-pcap-to-csv]
    field_names = 'RANGE (mm), SIGNAL, NEAR_IR, REFLECTIVITY, X (m), Y (m), Z (m)'
    field_fmts = ['%d', '%d', '%d', '%d', '%.8f', '%.8f', '%.8f']

    channels = [
        client.ChanField.RANGE, client.ChanField.SIGNAL,
        client.ChanField.NEAR_IR, client.ChanField.REFLECTIVITY
    ]

    with closing(pcap.Pcap(pcap_path, metadata)) as source:

        # precompute xyzlut to save computation in a loop
        xyzlut = client.XYZLut(metadata)

        # create an iterator of LidarScans from pcap and bound it if num is specified
        scans = iter(client.Scans(source))
        if num:
            scans = islice(scans, num)

        for idx, scan in enumerate(scans):

            fields_values = [scan.field(ch) for ch in channels]
            xyz = xyzlut(scan)

            # get lidar data as one frame of [H x W x 7], "fat" 2D image
            frame = np.dstack((*fields_values, xyz))
            frame = client.destagger(metadata, frame)

            csv_path = os.path.join(csv_dir,
                                    f'{csv_prefix}_{idx:06d}.{csv_ext}')

            header = '\n'.join([
                f'pcap file: {pcap_path}', f'frame num: {idx}',
                f'metadata file: {metadata_path}', field_names
            ])

            print(f'write frame #{idx}, to file: {csv_path}')

            np.savetxt(csv_path,
                       np.reshape(frame, (-1, frame.shape[2])),
                       fmt=field_fmts,
                       delimiter=',',
                       header=header)
Esempio n. 15
0
def main():
    """PointViz visualizer examples."""

    parser = argparse.ArgumentParser(
        description=main.__doc__,
        formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument('pcap_path',
                        nargs='?',
                        metavar='PCAP',
                        help='path to pcap file')
    parser.add_argument('meta_path',
                        nargs='?',
                        metavar='METADATA',
                        help='path to metadata json')

    args = parser.parse_args()

    pcap_path = os.getenv("SAMPLE_DATA_PCAP_PATH", args.pcap_path)
    meta_path = os.getenv("SAMPLE_DATA_JSON_PATH", args.meta_path)

    if not pcap_path or not meta_path:
        print("ERROR: Please add SAMPLE_DATA_PCAP_PATH and SAMPLE_DATA_JSON_PATH to" +
            " environment variables or pass <pcap_path> and <meta_path>")
        sys.exit()

    print(f"Using:\n\tjson: {meta_path}\n\tpcap: {pcap_path}")

    # Getting data sources
    meta = client.SensorInfo(open(meta_path).read())
    packets = pcap.Pcap(pcap_path, meta)
    scans = iter(client.Scans(packets))

    # ==============================
    print("Ex 0: Empty Point Viz")

    # [doc-stag-empty-pointviz]
    # Creating a point viz instance
    point_viz = viz.PointViz("Example Viz")
    viz.add_default_controls(point_viz)

    # ... add objects here

    # update internal objects buffers and run visualizer
    point_viz.update()
    point_viz.run()
    # [doc-etag-empty-pointviz]


    # =========================================================================
    print("Ex 1.0:\tImages and Labels: the Image object and 2D Image set_position() - height-normalized screen coordinates")

    label_top = viz.Label("[0, 1]", 0.5, 0.0, align_top=True)
    label_top.set_scale(2)
    point_viz.add(label_top)

    label_bot = viz.Label("[0, -1]", 0.5, 1, align_top=False)
    label_bot.set_scale(2)
    point_viz.add(label_bot)

    # [doc-stag-image-pos-center]
    img = viz.Image()
    img.set_image(np.full((10, 10), 0.5))
    img.set_position(-0.5, 0.5, -0.5, 0.5)
    point_viz.add(img)
    # [doc-etag-image-pos-center]

    # visualize
    point_viz.update()
    point_viz.run()

    # =========================================================================
    print("Ex 1.1:\tImages and Labels: Window-aligned images with 2D Image set_hshift() - width-normalized [-1, 1] horizontal shift")

    # [doc-stag-image-pos-left]
    # move img to the left
    img.set_position(0, 1, -0.5, 0.5)
    img.set_hshift(-1)
    # [doc-etag-image-pos-left]

    # visualize
    point_viz.update()
    point_viz.run()

    # [doc-stag-image-pos-right]
    # move img to the right
    img.set_position(-1, 0, -0.5, 0.5)
    img.set_hshift(1)
    # [doc-etag-image-pos-right]

    # visualize
    point_viz.update()
    point_viz.run()

    # [doc-stag-image-pos-right-bottom]
    # move img to the right bottom
    img.set_position(-1, 0, -1, 0)
    img.set_hshift(1)
    # [doc-etag-image-pos-right-bottom]

    # visualize
    point_viz.update()
    point_viz.run()


    # remove_objs(point_viz, [label_top, label_mid, label_bot, img])
    remove_objs(point_viz, [label_top, label_bot, img])

    # =======================================
    print("Ex 1.2:\tImages and Labels: Lidar Scan Fields as Images")

    # [doc-stag-scan-fields-images]
    scan = next(scans)

    img_aspect = (meta.beam_altitude_angles[0] -
                  meta.beam_altitude_angles[-1]) / 360.0
    img_screen_height = 0.4 # [0..2]
    img_screen_len = img_screen_height / img_aspect

    # prepare field data
    ranges = scan.field(client.ChanField.RANGE)
    ranges = client.destagger(meta, ranges)
    ranges = np.divide(ranges, np.amax(ranges), dtype=np.float32)

    signal = scan.field(client.ChanField.SIGNAL)
    signal = client.destagger(meta, signal)
    signal = np.divide(signal, np.amax(signal), dtype=np.float32)

    # creating Image viz elements
    range_img = viz.Image()
    range_img.set_image(ranges)
    # top center position
    range_img.set_position(-img_screen_len / 2, img_screen_len / 2,
                           1 - img_screen_height, 1)
    point_viz.add(range_img)

    signal_img = viz.Image()
    signal_img.set_image(signal)
    img_aspect = (meta.beam_altitude_angles[0] -
                meta.beam_altitude_angles[-1]) / 360.0
    img_screen_height = 0.4 # [0..2]
    img_screen_len = img_screen_height / img_aspect
    # bottom center position
    signal_img.set_position(-img_screen_len / 2, img_screen_len / 2, -1,
                            -1 + img_screen_height)
    point_viz.add(signal_img)
    # [doc-etag-scan-fields-images]

    # visualize
    point_viz.update()
    point_viz.run()

    print("Ex 1.3:\tImages and Labels: Adding labels")

    # [doc-stag-scan-fields-images-labels]
    range_label = viz.Label(str(client.ChanField.RANGE), 0.5, 0, align_top=True)
    range_label.set_scale(1)
    point_viz.add(range_label)

    signal_label = viz.Label(str(client.ChanField.SIGNAL),
                            0.5, 1 - img_screen_height / 2,
                            align_top=True)
    signal_label.set_scale(1)
    point_viz.add(signal_label)
    # [doc-etag-scan-fields-images-labels]

    # visualize
    point_viz.update()
    point_viz.run()

    # ===============================================================
    print("Ex 2.0:\tPoint Clouds: As Structured Points")

    # [doc-stag-scan-structured]
    cloud_scan = viz.Cloud(meta)
    cloud_scan.set_range(scan.field(client.ChanField.RANGE))
    cloud_scan.set_key(signal)
    point_viz.add(cloud_scan)
    # [doc-etag-scan-structured]

    # visualize
    point_viz.update()
    point_viz.run()

    remove_objs(point_viz, [cloud_scan])

    # ===============================================================
    print("Ex 2.1:\tPoint Clouds: As Unstructured Points")

    # [doc-stag-scan-unstructured]
    # transform scan data to 3d points
    xyzlut = client.XYZLut(meta)
    xyz = xyzlut(scan.field(client.ChanField.RANGE))

    cloud_xyz = viz.Cloud(xyz.shape[0] * xyz.shape[1])
    cloud_xyz.set_xyz(np.reshape(xyz, (-1, 3)))
    cloud_xyz.set_key(signal.ravel())
    point_viz.add(cloud_xyz)
    # [doc-etag-scan-unstructured]

    point_viz.camera.dolly(150)

    # visualize
    point_viz.update()
    point_viz.run()

    # =======================================================
    print("Ex 2.2:\tPoint Clouds: Custom Axes Helper as Unstructured Points")

    # [doc-stag-axes-helper]
    # basis vectors
    x_ = np.array([1, 0, 0]).reshape((-1, 1))
    y_ = np.array([0, 1, 0]).reshape((-1, 1))
    z_ = np.array([0, 0, 1]).reshape((-1, 1))

    axis_n = 100
    line = np.linspace(0, 1, axis_n).reshape((1, -1))

    # basis vector to point cloud
    axis_points = np.hstack((x_ @ line, y_ @ line, z_ @ line)).transpose()

    # colors for basis vectors
    axis_color_mask = np.vstack((
        np.full((axis_n, 4), [1, 0.1, 0.1, 1]),
        np.full((axis_n, 4), [0.1, 1, 0.1, 1]),
        np.full((axis_n, 4), [0.1, 0.1, 1, 1])))

    cloud_axis = viz.Cloud(axis_points.shape[0])
    cloud_axis.set_xyz(axis_points)
    cloud_axis.set_key(np.full(axis_points.shape[0], 0.5))
    cloud_axis.set_mask(axis_color_mask)
    cloud_axis.set_point_size(3)
    point_viz.add(cloud_axis)
    # [doc-etag-axes-helper]

    point_viz.camera.dolly(50)

    # visualize
    point_viz.update()
    point_viz.run()

    remove_objs(point_viz, [
        range_img, range_label, signal_img, signal_label, cloud_axis, cloud_xyz
    ])

    # ===============================================================
    print("Ex 2.3:\tPoint Clouds: the LidarScanViz class")

    # [doc-stag-lidar-scan-viz]
    # Creating LidarScan visualizer (3D point cloud + field images on top)
    ls_viz = viz.LidarScanViz(meta, point_viz)

    # adding scan to the lidar scan viz
    ls_viz.scan = scan

    # refresh viz data
    ls_viz.draw()

    # visualize
    # update() is not needed for LidatScanViz because it's doing it internally
    point_viz.run()
    # [doc-etag-lidar-scan-viz]

    # ===================================================
    print("Ex 3.0:\tAugmenting point clouds with 3D Labels")

    # [doc-stag-lidar-scan-viz-labels]
    # Adding 3D Labels
    label1 = viz.Label("Label1: [1, 2, 4]", 1, 2, 4)
    point_viz.add(label1)

    label2 = viz.Label("Label2: [2, 1, 4]", 2, 1, 4)
    label2.set_scale(2)
    point_viz.add(label2)

    label3 = viz.Label("Label3: [4, 2, 1]", 4, 2, 1)
    label3.set_scale(3)
    point_viz.add(label3)
    # [doc-etag-lidar-scan-viz-labels]

    point_viz.camera.dolly(-100)

    # visualize
    point_viz.update()
    point_viz.run()


    # ===============================================
    print("Ex 4.0:\tOverlay 2D Images and 2D Labels")

    # [doc-stag-overlay-images-labels]
    # Adding image 1 with aspect ratio preserved
    img = viz.Image()
    img_data = make_checker_board(10, (2, 4))
    mask_data = np.zeros((30, 30, 4))
    mask_data[:15, :15] = np.array([1, 0, 0, 1])
    img.set_mask(mask_data)
    img.set_image(img_data)
    ypos = (0, 0.5)
    xlen = (ypos[1] - ypos[0]) * img_data.shape[1] / img_data.shape[0]
    xpos = (0, xlen)
    img.set_position(*xpos, *ypos)
    img.set_hshift(-0.5)
    point_viz.add(img)

    # Adding Label for image 1: positioned at bottom left corner
    img_label = viz.Label("ARRrrr!", 0.25, 0.5)
    img_label.set_rgba((1.0, 1.0, 0.0, 1))
    img_label.set_scale(2)
    point_viz.add(img_label)

    # Adding image 2: positioned to the right of the window
    img2 = viz.Image()
    img_data2 = make_checker_board(10, (4, 2))
    mask_data2 = np.zeros((30, 30, 4))
    mask_data2[15:25, 15:25] = np.array([0, 1, 0, 0.5])
    img2.set_mask(mask_data2)
    img2.set_image(img_data2)
    ypos2 = (0, 0.5)
    xlen2 = (ypos2[1] - ypos2[0]) * img_data2.shape[1] / img_data2.shape[0]
    xpos2 = (-xlen2, 0)
    img2.set_position(*xpos2, *ypos2)
    img2.set_hshift(1.0)
    point_viz.add(img2)

    # Adding Label for image 2: positioned at top left corner
    img_label2 = viz.Label("Second", 1.0, 0.25, align_top=True, align_right=True)
    img_label2.set_rgba((0.0, 1.0, 1.0, 1))
    img_label2.set_scale(1)
    point_viz.add(img_label2)
    # [doc-etag-overlay-images-labels]

    # visualize
    point_viz.update()
    point_viz.run()


    # ===============================================================
    print("Ex 5.0:\tAdding key handlers: 'R' for random camera dolly")

    # [doc-stag-key-handlers]
    def handle_dolly_random(ctx, key, mods) -> bool:
        if key == 82:  # key R
            dolly_num = random.randrange(-15, 15)
            print(f"Random Dolly: {dolly_num}")
            point_viz.camera.dolly(dolly_num)
            point_viz.update()
        return True

    point_viz.push_key_handler(handle_dolly_random)
    # [doc-etag-key-handlers]

    # visualize
    point_viz.update()
    point_viz.run()
Esempio n. 16
0
def pcap_to_csv(source: client.PacketSource,
                metadata: client.SensorInfo,
                num: int = 0,
                csv_dir: str = ".",
                csv_base: str = "pcap_out",
                csv_ext: str = "csv") -> None:
    """Write scans from a pcap to csv files (one per lidar scan).

    The number of saved lines per csv file is always H x W, which corresponds to
    a full 2D image representation of a lidar scan.

    Each line in a csv file is (for LEGACY profile):

        TIMESTAMP, RANGE (mm), SIGNAL, NEAR_IR, REFLECTIVITY, X (mm), Y (mm), Z (mm)

    If ``csv_ext`` ends in ``.gz``, the file is automatically saved in
    compressed gzip format. :func:`.numpy.loadtxt` can be used to read gzipped
    files transparently back to :class:`.numpy.ndarray`.

    Args:
        source: PacketSource from pcap
        metadata: associated SensorInfo for PacketSource
        num: number of scans to save from pcap to csv files
        csv_dir: path to the directory where csv files will be saved
        csv_base: string to use as the base of the filename for pcap output
        csv_ext: file extension to use, "csv" by default
    """

    # ensure that base csv_dir exists
    if not os.path.exists(csv_dir):
        os.makedirs(csv_dir)

    # construct csv header and data format
    def get_fields_info(scan: client.LidarScan) -> Tuple[str, List[str]]:
        field_names = 'TIMESTAMP (ns)'
        field_fmts = ['%d']
        for chan_field in scan.fields:
            field_names += f', {chan_field}'
            if chan_field in [client.ChanField.RANGE, client.ChanField.RANGE2]:
                field_names += ' (mm)'
            field_fmts.append('%d')
        field_names += ', X (mm), Y (mm), Z (mm)'
        field_fmts.extend(3 * ['%d'])
        return field_names, field_fmts

    field_names: str = ''
    field_fmts: List[str] = []

    # [doc-stag-pcap-to-csv]
    from itertools import islice
    # precompute xyzlut to save computation in a loop
    xyzlut = client.XYZLut(metadata)

    # create an iterator of LidarScans from pcap and bound it if num is specified
    scans = iter(client.Scans(source))
    if num:
        scans = islice(scans, num)

    for idx, scan in enumerate(scans):

        # initialize the field names for csv header
        if not field_names or not field_fmts:
            field_names, field_fmts = get_fields_info(scan)

        # copy per-column timestamps for each channel
        timestamps = np.tile(scan.timestamp, (scan.h, 1))

        # grab channel data
        fields_values = [scan.field(ch) for ch in scan.fields]

        # use integer mm to avoid loss of precision casting timestamps
        xyz = (xyzlut(scan) * 1000).astype(np.int64)

        # get all data as one H x W x 8 int64 array for savetxt()
        frame = np.dstack((timestamps, *fields_values, xyz))

        # not necessary, but output points in "image" vs. staggered order
        frame = client.destagger(metadata, frame)

        # write csv out to file
        csv_path = os.path.join(csv_dir, f'{csv_base}_{idx:06d}.{csv_ext}')
        print(f'write frame #{idx}, to file: {csv_path}')

        header = '\n'.join([f'frame num: {idx}', field_names])

        np.savetxt(csv_path,
                   frame.reshape(-1, frame.shape[2]),
                   fmt=field_fmts,
                   delimiter=',',
                   header=header)
Esempio n. 17
0
 def prepare_field_image(scan, key, metadata, destagger=True):
     f = ae(scan.field(key))
     if destagger:
         return client.destagger(metadata, f)
     return f
Esempio n. 18
0
def test_destagger_shape_bad(meta) -> None:
    """Check that arrays of the wrong shape are rejected."""
    h = meta.format.pixels_per_column
    w = meta.format.columns_per_frame

    with pytest.raises(ValueError):
        client.destagger(meta, np.zeros((0, w)))
    with pytest.raises(ValueError):
        client.destagger(meta, np.zeros((h, 0, 2)))

    with pytest.raises(ValueError):
        client.destagger(meta, np.zeros((h, w + 1)))
    with pytest.raises(ValueError):
        client.destagger(meta, np.zeros((h - 1, w)))
    with pytest.raises(ValueError):
        client.destagger(meta, np.zeros((h, w - 1, 1)))
    with pytest.raises(ValueError):
        client.destagger(meta, np.zeros((h + 1, w, 2)))
Esempio n. 19
0
def test_destagger_shape_good(meta, shape) -> None:
    """Check that (de)staggering preserves shape."""
    assert client.destagger(meta, np.zeros(shape)).shape == shape
    assert client.destagger(meta, np.zeros(shape), inverse=True).shape == shape