Beispiel #1
0
def test_write_info(metadata: client.SensorInfo) -> None:
    """Check modifying metadata."""
    metadata.hostname = ""
    metadata.sn = ""
    metadata.fw_rev = ""
    metadata.mode = client.LidarMode.MODE_UNSPEC
    metadata.prod_line = ""
    metadata.format.columns_per_frame = 0
    metadata.format.columns_per_packet = 0
    metadata.format.pixels_per_column = 0
    metadata.format.column_window = (0, 0)
    metadata.format.pixel_shift_by_row = []
    metadata.beam_azimuth_angles = []
    metadata.beam_altitude_angles = []
    metadata.imu_to_sensor_transform = numpy.zeros((4, 4))
    metadata.lidar_to_sensor_transform = numpy.zeros((4, 4))
    metadata.extrinsic = numpy.zeros((4, 4))
    metadata.lidar_origin_to_beam_origin_mm = 0.0

    assert metadata == client.SensorInfo()

    with pytest.raises(TypeError):
        metadata.mode = 1  # type: ignore
    with pytest.raises(TypeError):
        metadata.imu_to_sensor_transform = numpy.zeros((4, 5))
    with pytest.raises(TypeError):
        metadata.lidar_to_sensor_transform = numpy.zeros((3, 4))
    with pytest.raises(TypeError):
        metadata.extrinsic = numpy.zeros(16)
    with pytest.raises(TypeError):
        metadata.beam_altitude_angles = 1  # type: ignore
    with pytest.raises(TypeError):
        metadata.beam_azimuth_angles = ["foo"]  # type: ignore
Beispiel #2
0
def main() -> None:
    descr = """Visualize pcap or sensor data using simple viz bindings."""

    epilog = """When reading data from a sensor, this will autoconfigure the udp
        destination unless -x is used."""

    parser = argparse.ArgumentParser(description=descr, epilog=epilog)

    required = parser.add_argument_group('one of the following is required')
    group = required.add_mutually_exclusive_group(required=True)
    group.add_argument('--sensor', metavar='HOST', help='sensor hostname')
    group.add_argument('--pcap', metavar='PATH', help='path to pcap file')
    parser.add_argument('--meta', metavar='PATH', help='path to metadata json')
    parser.add_argument('--lidar-port', type=int, help='lidar port for sensor')
    parser.add_argument('-x',
                        '--no-auto-dest',
                        action='store_true',
                        help='do not auto configure udp destination')

    args = parser.parse_args()

    if args.sensor:
        hostname = args.sensor
        if args.lidar_port or (not args.no_auto_dest):
            config = client.SensorConfig()
            if args.lidar_port:
                config.udp_port_lidar = args.lidar_port
            print("Configuring sensor...")
            client.set_config(hostname,
                              config,
                              udp_dest_auto=(not args.no_auto_dest))
        config = client.get_config(hostname)

        print("Initializing...")
        scans = client.Scans.stream(hostname,
                                    config.udp_port_lidar or 7502,
                                    complete=False)
        rate = None

    elif args.pcap:
        import ouster.pcap as pcap

        if args.meta:
            metadata_path = args.meta
        else:
            print("Deducing metadata based on pcap name. "
                  "To provide a different metadata path, use --meta")
            metadata_path = os.path.splitext(args.pcap)[0] + ".json"

        with open(metadata_path) as json:
            info = client.SensorInfo(json.read())
        scans = client.Scans(pcap.Pcap(args.pcap, info))
        rate = 1.0

    SimpleViz(scans.metadata, rate).run(scans)
Beispiel #3
0
def read_metadata(metadata_path: str) -> client.SensorInfo:
    """Read metadata json file as :class:`.client.SensorInfo` object

    Args:
        metadata_path: path to json file with sensor info

    Returns:
        :class:`.SensorInfo`: object initialized from the give json file
    """
    with open(metadata_path, 'r') as f:
        return client.SensorInfo(f.read())
Beispiel #4
0
def main():
    """Pcap examples runner."""
    examples = {
        "plot-xyz-points": pcap_display_xyz_points,
        "2d-viewer": pcap_2d_viewer,
        "read-packets": pcap_read_packets,
        "plot-one-scan": pcap_show_one_scan,
        "pcap-to-csv": pcap_to_csv,
        "pcap-to-pcd": pcap_to_pcd,
        "pcap-to-las": pcap_to_las,
        "open3d-one-scan": pcap_3d_one_scan,
    }

    description = "Ouster Python SDK Pcap examples. The EXAMPLE must be one of:\n" + str.join(
        '\n  ', examples.keys())

    parser = argparse.ArgumentParser(
        description=description, formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument('pcap_path', metavar='PCAP', help='path to pcap file')
    parser.add_argument('metadata_path',
                        metavar='METADATA',
                        help='path to metadata json')
    parser.add_argument('example',
                        metavar='EXAMPLE',
                        choices=examples.keys(),
                        help='name of the example to run')
    parser.add_argument('--scan-num',
                        type=int,
                        default=1,
                        help='index of scan to use')
    args = parser.parse_args()

    try:
        example = examples[args.example]
    except KeyError:
        print(f"No such example: {args.example}")
        print(description)
        exit(1)

    if not args.metadata_path or not os.path.exists(args.metadata_path):
        print(f"Metadata file does not exist: {args.metadata_path}")
        exit(1)

    print(f'example: {args.example}')

    with open(args.metadata_path, 'r') as f:
        metadata = client.SensorInfo(f.read())
    source = pcap.Pcap(args.pcap_path, metadata)

    with closing(source):
        example(source, metadata, args.scan_num)  # type: ignore
Beispiel #5
0
def main() -> None:
    import argparse
    import os
    import ouster.pcap as pcap

    descr = """Example visualizer using the open3d library.

    Visualize either pcap data (specified using --pcap) or a running sensor
    (specified using --sensor). If no metadata file is specified, this will look
    for a file with the same name as the pcap with the '.json' extension, or
    query it directly from the sensor.

    Visualizing a running sensor requires the sensor to be configured and
    sending lidar data to the default UDP port (7502) on the host machine.
    """

    parser = argparse.ArgumentParser(description=descr)
    parser.add_argument('--pause', action='store_true', help='start paused')
    parser.add_argument('--start', type=int, help='skip to frame number')
    parser.add_argument('--meta', metavar='PATH', help='path to metadata json')

    required = parser.add_argument_group('one of the following is required')
    group = required.add_mutually_exclusive_group(required=True)
    group.add_argument('--sensor', metavar='HOST', help='sensor hostname')
    group.add_argument('--pcap', metavar='PATH', help='path to pcap file')

    args = parser.parse_args()

    if args.sensor:
        scans = client.Scans.stream(args.sensor, metadata=args.meta)
    elif args.pcap:
        pcap_path = args.pcap
        metadata_path = args.meta or os.path.splitext(pcap_path)[0] + ".json"

        with open(metadata_path, 'r') as f:
            metadata = client.SensorInfo(f.read())

        source = pcap.Pcap(pcap_path, metadata)
        scans = client.Scans(source)
        consume(scans, args.start or 0)

    try:
        viewer_3d(scans, paused=args.pause)
    except (KeyboardInterrupt, StopIteration):
        pass
    finally:
        scans.close()
def test_parse_info() -> None:
    """Sanity check parsing from json."""
    with pytest.raises(ValueError):
        client.SensorInfo('/')
    with pytest.raises(ValueError):
        client.SensorInfo('')
    with pytest.raises(ValueError):
        client.SensorInfo('{  }')
    with pytest.raises(ValueError):
        client.SensorInfo('{ "lidar_mode": "1024x10" }')

    # TODO: this should actually fail unless *all* parameters needed to
    # unambiguously interpret a sensor data stream are present
    metadata = {
        'lidar_mode': '1024x10',
        'beam_altitude_angles': [0] * 64,
        'beam_azimuth_angles': [0] * 64,
        'lidar_to_sensor_transform': list(range(16))
    }
    info = client.SensorInfo(json.dumps(metadata))

    # check that data format defaults are populated
    assert info.format.pixels_per_column == 64
    assert info.format.columns_per_frame == 1024
    assert info.format.columns_per_packet > 0
    assert info.format.column_window[0] == 0
    assert info.format.column_window[1] == 1023
    assert len(info.format.pixel_shift_by_row) == 64

    # the lidar_to_sensor_transform json is interpreted as a 4x4 matrix in
    # row-major order. Numpy also uses row-major storage order.
    assert numpy.array_equal(info.lidar_to_sensor_transform,
                             numpy.array(range(16)).reshape(4, 4))
    assert numpy.array_equal(info.extrinsic, numpy.identity(4))

    metadata['lidar_origin_to_beam_origin_mm'] = 'foo'
    with pytest.raises(RuntimeError):
        client.SensorInfo(json.dumps(metadata))
Beispiel #7
0
def main():
    """Pcap examples runner."""
    examples = {
        "open3d-one-scan": pcap_3d_one_scan,
        "plot-xyz-points": pcap_display_xyz_points,
        "pcap-to-csv": pcap_to_csv,
        "pcap-to-las": pcap_to_las,
        "pcap-to-pcd": pcap_to_pcd,
        "pcap-to-ply": pcap_to_ply,
        "query-scan": pcap_query_scan,
        "read-packets": pcap_read_packets,
    }

    description = "Ouster Python SDK Pcap examples. The EXAMPLE must be one of:\n  " + str.join(
        '\n  ', examples.keys())

    parser = argparse.ArgumentParser(
        description=description, formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument('pcap_path', metavar='PCAP', help='path to pcap file')
    parser.add_argument('metadata_path',
                        metavar='METADATA',
                        help='path to metadata json')
    parser.add_argument('example',
                        metavar='EXAMPLE',
                        choices=examples.keys(),
                        help='name of the example to run')
    parser.add_argument('--scan-num',
                        type=int,
                        default=1,
                        help='index of scan to use')
    args = parser.parse_args()

    try:
        example = examples[args.example]
    except KeyError:
        print(f"No such example: {args.example}")
        print(description)
        exit(1)

    if not args.metadata_path or not os.path.exists(args.metadata_path):
        print(f"Metadata file does not exist: {args.metadata_path}")
        exit(1)

    with open(args.metadata_path, 'r') as f:
        metadata = client.SensorInfo(f.read())

    if (metadata.format.udp_profile_lidar !=
            client.UDPProfileLidar.PROFILE_LIDAR_LEGACY
            and metadata.format.udp_profile_lidar !=
            client.UDPProfileLidar.PROFILE_LIDAR_RNG19_RFL8_SIG16_NIR16
            and args.example != 'query-scan'):
        print(
            f"This pcap example is only for pcaps of sensors in LEGACY or SINGLE RETURN mode. Exiting..."
        )
        exit(1)

    print(f'example: {args.example}')

    source = pcap.Pcap(args.pcap_path, metadata)

    with closing(source):
        example(source, metadata, args.scan_num)  # type: ignore
Beispiel #8
0
TODO:
- separate set scan / update is annoying
- a proxy run()/quit() on ls_viz would be useful
- maybe: ls_viz could initialize underlying viz + expose it
- point_viz.run() twice is broken
- ideally, run() would open/close window
- auto camera movement example?
"""

from ouster import client, pcap
from ouster.sdk import viz

meta_path = "/mnt/aux/test_drives/OS1_128_2048x10.json"
pcap_path = "/mnt/aux/test_drives/OS1_128_2048x10.pcap"

meta = client.SensorInfo(open(meta_path).read())
packets = pcap.Pcap(pcap_path, meta)
scans = iter(client.Scans(packets))

point_viz = viz.PointViz("Example Viz")
ls_viz = viz.LidarScanViz(meta, point_viz)

ls_viz.scan = next(scans)
ls_viz.draw()
print("Showing first frame, close visuzlier window to continue")
point_viz.run()

ls_viz.scan = next(scans)
ls_viz.draw()
print("Showing second frame, close visuzlier window to continue")
point_viz.run()
def metadata():
    meta_path = os.path.join(DATA_DIR, "os-992011000121_meta.json")
    with open(meta_path, 'r') as f:
        return client.SensorInfo(f.read())
Beispiel #10
0
def main():
    """PointViz visualizer examples."""

    parser = argparse.ArgumentParser(
        description=main.__doc__,
        formatter_class=argparse.RawTextHelpFormatter)

    parser.add_argument('pcap_path',
                        nargs='?',
                        metavar='PCAP',
                        help='path to pcap file')
    parser.add_argument('meta_path',
                        nargs='?',
                        metavar='METADATA',
                        help='path to metadata json')

    args = parser.parse_args()

    pcap_path = os.getenv("SAMPLE_DATA_PCAP_PATH", args.pcap_path)
    meta_path = os.getenv("SAMPLE_DATA_JSON_PATH", args.meta_path)

    if not pcap_path or not meta_path:
        print("ERROR: Please add SAMPLE_DATA_PCAP_PATH and SAMPLE_DATA_JSON_PATH to" +
            " environment variables or pass <pcap_path> and <meta_path>")
        sys.exit()

    print(f"Using:\n\tjson: {meta_path}\n\tpcap: {pcap_path}")

    # Getting data sources
    meta = client.SensorInfo(open(meta_path).read())
    packets = pcap.Pcap(pcap_path, meta)
    scans = iter(client.Scans(packets))

    # ==============================
    print("Ex 0: Empty Point Viz")

    # [doc-stag-empty-pointviz]
    # Creating a point viz instance
    point_viz = viz.PointViz("Example Viz")
    viz.add_default_controls(point_viz)

    # ... add objects here

    # update internal objects buffers and run visualizer
    point_viz.update()
    point_viz.run()
    # [doc-etag-empty-pointviz]


    # =========================================================================
    print("Ex 1.0:\tImages and Labels: the Image object and 2D Image set_position() - height-normalized screen coordinates")

    label_top = viz.Label("[0, 1]", 0.5, 0.0, align_top=True)
    label_top.set_scale(2)
    point_viz.add(label_top)

    label_bot = viz.Label("[0, -1]", 0.5, 1, align_top=False)
    label_bot.set_scale(2)
    point_viz.add(label_bot)

    # [doc-stag-image-pos-center]
    img = viz.Image()
    img.set_image(np.full((10, 10), 0.5))
    img.set_position(-0.5, 0.5, -0.5, 0.5)
    point_viz.add(img)
    # [doc-etag-image-pos-center]

    # visualize
    point_viz.update()
    point_viz.run()

    # =========================================================================
    print("Ex 1.1:\tImages and Labels: Window-aligned images with 2D Image set_hshift() - width-normalized [-1, 1] horizontal shift")

    # [doc-stag-image-pos-left]
    # move img to the left
    img.set_position(0, 1, -0.5, 0.5)
    img.set_hshift(-1)
    # [doc-etag-image-pos-left]

    # visualize
    point_viz.update()
    point_viz.run()

    # [doc-stag-image-pos-right]
    # move img to the right
    img.set_position(-1, 0, -0.5, 0.5)
    img.set_hshift(1)
    # [doc-etag-image-pos-right]

    # visualize
    point_viz.update()
    point_viz.run()

    # [doc-stag-image-pos-right-bottom]
    # move img to the right bottom
    img.set_position(-1, 0, -1, 0)
    img.set_hshift(1)
    # [doc-etag-image-pos-right-bottom]

    # visualize
    point_viz.update()
    point_viz.run()


    # remove_objs(point_viz, [label_top, label_mid, label_bot, img])
    remove_objs(point_viz, [label_top, label_bot, img])

    # =======================================
    print("Ex 1.2:\tImages and Labels: Lidar Scan Fields as Images")

    # [doc-stag-scan-fields-images]
    scan = next(scans)

    img_aspect = (meta.beam_altitude_angles[0] -
                  meta.beam_altitude_angles[-1]) / 360.0
    img_screen_height = 0.4 # [0..2]
    img_screen_len = img_screen_height / img_aspect

    # prepare field data
    ranges = scan.field(client.ChanField.RANGE)
    ranges = client.destagger(meta, ranges)
    ranges = np.divide(ranges, np.amax(ranges), dtype=np.float32)

    signal = scan.field(client.ChanField.SIGNAL)
    signal = client.destagger(meta, signal)
    signal = np.divide(signal, np.amax(signal), dtype=np.float32)

    # creating Image viz elements
    range_img = viz.Image()
    range_img.set_image(ranges)
    # top center position
    range_img.set_position(-img_screen_len / 2, img_screen_len / 2,
                           1 - img_screen_height, 1)
    point_viz.add(range_img)

    signal_img = viz.Image()
    signal_img.set_image(signal)
    img_aspect = (meta.beam_altitude_angles[0] -
                meta.beam_altitude_angles[-1]) / 360.0
    img_screen_height = 0.4 # [0..2]
    img_screen_len = img_screen_height / img_aspect
    # bottom center position
    signal_img.set_position(-img_screen_len / 2, img_screen_len / 2, -1,
                            -1 + img_screen_height)
    point_viz.add(signal_img)
    # [doc-etag-scan-fields-images]

    # visualize
    point_viz.update()
    point_viz.run()

    print("Ex 1.3:\tImages and Labels: Adding labels")

    # [doc-stag-scan-fields-images-labels]
    range_label = viz.Label(str(client.ChanField.RANGE), 0.5, 0, align_top=True)
    range_label.set_scale(1)
    point_viz.add(range_label)

    signal_label = viz.Label(str(client.ChanField.SIGNAL),
                            0.5, 1 - img_screen_height / 2,
                            align_top=True)
    signal_label.set_scale(1)
    point_viz.add(signal_label)
    # [doc-etag-scan-fields-images-labels]

    # visualize
    point_viz.update()
    point_viz.run()

    # ===============================================================
    print("Ex 2.0:\tPoint Clouds: As Structured Points")

    # [doc-stag-scan-structured]
    cloud_scan = viz.Cloud(meta)
    cloud_scan.set_range(scan.field(client.ChanField.RANGE))
    cloud_scan.set_key(signal)
    point_viz.add(cloud_scan)
    # [doc-etag-scan-structured]

    # visualize
    point_viz.update()
    point_viz.run()

    remove_objs(point_viz, [cloud_scan])

    # ===============================================================
    print("Ex 2.1:\tPoint Clouds: As Unstructured Points")

    # [doc-stag-scan-unstructured]
    # transform scan data to 3d points
    xyzlut = client.XYZLut(meta)
    xyz = xyzlut(scan.field(client.ChanField.RANGE))

    cloud_xyz = viz.Cloud(xyz.shape[0] * xyz.shape[1])
    cloud_xyz.set_xyz(np.reshape(xyz, (-1, 3)))
    cloud_xyz.set_key(signal.ravel())
    point_viz.add(cloud_xyz)
    # [doc-etag-scan-unstructured]

    point_viz.camera.dolly(150)

    # visualize
    point_viz.update()
    point_viz.run()

    # =======================================================
    print("Ex 2.2:\tPoint Clouds: Custom Axes Helper as Unstructured Points")

    # [doc-stag-axes-helper]
    # basis vectors
    x_ = np.array([1, 0, 0]).reshape((-1, 1))
    y_ = np.array([0, 1, 0]).reshape((-1, 1))
    z_ = np.array([0, 0, 1]).reshape((-1, 1))

    axis_n = 100
    line = np.linspace(0, 1, axis_n).reshape((1, -1))

    # basis vector to point cloud
    axis_points = np.hstack((x_ @ line, y_ @ line, z_ @ line)).transpose()

    # colors for basis vectors
    axis_color_mask = np.vstack((
        np.full((axis_n, 4), [1, 0.1, 0.1, 1]),
        np.full((axis_n, 4), [0.1, 1, 0.1, 1]),
        np.full((axis_n, 4), [0.1, 0.1, 1, 1])))

    cloud_axis = viz.Cloud(axis_points.shape[0])
    cloud_axis.set_xyz(axis_points)
    cloud_axis.set_key(np.full(axis_points.shape[0], 0.5))
    cloud_axis.set_mask(axis_color_mask)
    cloud_axis.set_point_size(3)
    point_viz.add(cloud_axis)
    # [doc-etag-axes-helper]

    point_viz.camera.dolly(50)

    # visualize
    point_viz.update()
    point_viz.run()

    remove_objs(point_viz, [
        range_img, range_label, signal_img, signal_label, cloud_axis, cloud_xyz
    ])

    # ===============================================================
    print("Ex 2.3:\tPoint Clouds: the LidarScanViz class")

    # [doc-stag-lidar-scan-viz]
    # Creating LidarScan visualizer (3D point cloud + field images on top)
    ls_viz = viz.LidarScanViz(meta, point_viz)

    # adding scan to the lidar scan viz
    ls_viz.scan = scan

    # refresh viz data
    ls_viz.draw()

    # visualize
    # update() is not needed for LidatScanViz because it's doing it internally
    point_viz.run()
    # [doc-etag-lidar-scan-viz]

    # ===================================================
    print("Ex 3.0:\tAugmenting point clouds with 3D Labels")

    # [doc-stag-lidar-scan-viz-labels]
    # Adding 3D Labels
    label1 = viz.Label("Label1: [1, 2, 4]", 1, 2, 4)
    point_viz.add(label1)

    label2 = viz.Label("Label2: [2, 1, 4]", 2, 1, 4)
    label2.set_scale(2)
    point_viz.add(label2)

    label3 = viz.Label("Label3: [4, 2, 1]", 4, 2, 1)
    label3.set_scale(3)
    point_viz.add(label3)
    # [doc-etag-lidar-scan-viz-labels]

    point_viz.camera.dolly(-100)

    # visualize
    point_viz.update()
    point_viz.run()


    # ===============================================
    print("Ex 4.0:\tOverlay 2D Images and 2D Labels")

    # [doc-stag-overlay-images-labels]
    # Adding image 1 with aspect ratio preserved
    img = viz.Image()
    img_data = make_checker_board(10, (2, 4))
    mask_data = np.zeros((30, 30, 4))
    mask_data[:15, :15] = np.array([1, 0, 0, 1])
    img.set_mask(mask_data)
    img.set_image(img_data)
    ypos = (0, 0.5)
    xlen = (ypos[1] - ypos[0]) * img_data.shape[1] / img_data.shape[0]
    xpos = (0, xlen)
    img.set_position(*xpos, *ypos)
    img.set_hshift(-0.5)
    point_viz.add(img)

    # Adding Label for image 1: positioned at bottom left corner
    img_label = viz.Label("ARRrrr!", 0.25, 0.5)
    img_label.set_rgba((1.0, 1.0, 0.0, 1))
    img_label.set_scale(2)
    point_viz.add(img_label)

    # Adding image 2: positioned to the right of the window
    img2 = viz.Image()
    img_data2 = make_checker_board(10, (4, 2))
    mask_data2 = np.zeros((30, 30, 4))
    mask_data2[15:25, 15:25] = np.array([0, 1, 0, 0.5])
    img2.set_mask(mask_data2)
    img2.set_image(img_data2)
    ypos2 = (0, 0.5)
    xlen2 = (ypos2[1] - ypos2[0]) * img_data2.shape[1] / img_data2.shape[0]
    xpos2 = (-xlen2, 0)
    img2.set_position(*xpos2, *ypos2)
    img2.set_hshift(1.0)
    point_viz.add(img2)

    # Adding Label for image 2: positioned at top left corner
    img_label2 = viz.Label("Second", 1.0, 0.25, align_top=True, align_right=True)
    img_label2.set_rgba((0.0, 1.0, 1.0, 1))
    img_label2.set_scale(1)
    point_viz.add(img_label2)
    # [doc-etag-overlay-images-labels]

    # visualize
    point_viz.update()
    point_viz.run()


    # ===============================================================
    print("Ex 5.0:\tAdding key handlers: 'R' for random camera dolly")

    # [doc-stag-key-handlers]
    def handle_dolly_random(ctx, key, mods) -> bool:
        if key == 82:  # key R
            dolly_num = random.randrange(-15, 15)
            print(f"Random Dolly: {dolly_num}")
            point_viz.camera.dolly(dolly_num)
            point_viz.update()
        return True

    point_viz.push_key_handler(handle_dolly_random)
    # [doc-etag-key-handlers]

    # visualize
    point_viz.update()
    point_viz.run()
Beispiel #11
0
def main() -> None:
    descr = """Visualize pcap or sensor data using simple viz bindings."""

    epilog = """When reading data from a sensor, this will currently not
    configure the sensor or query it for the port to listen on. You will need to
    set the sensor port and distination settings separately.
    """

    parser = argparse.ArgumentParser(description=descr, epilog=epilog)

    required = parser.add_argument_group('one of the following is required')
    group = required.add_mutually_exclusive_group(required=True)
    group.add_argument('--sensor', metavar='HOST', help='sensor hostname')
    group.add_argument('--pcap', metavar='PATH', help='path to pcap file')

    parser.add_argument('--meta', metavar='PATH', help='path to metadata json')
    parser.add_argument('--lidar-port', type=int, default=7502)

    args = parser.parse_args()

    if args.sensor:
        print("Initializing...")
        scans = client.Scans.stream(args.sensor,
                                    args.lidar_port,
                                    complete=False)
    elif args.pcap:
        import ouster.pcap as pcap

        if args.meta:
            metadata_path = args.meta
        else:
            print("Deducing metadata based on pcap name. "
                  "To provide a different metadata path, use --meta")
            metadata_path = os.path.splitext(args.pcap)[0] + ".json"

        with open(metadata_path) as json:
            info = client.SensorInfo(json.read())
        scans = client.Scans(
            pcap.Pcap(args.pcap, info, rate=1.0, lidar_port=args.lidar_port))

    viz = PyViz(scans.metadata)

    def run() -> None:
        try:
            for scan in scans:
                viz.draw(scan)
        finally:
            # signal main thread to exit
            viz.quit()

    try:
        print("Starting client thread...")
        client_thread = threading.Thread(target=run, name="Client")
        client_thread.start()

        print("Starting rendering loop...")
        viz.loop()
    finally:
        scans.close()
        client_thread.join()

    print("Done")
Beispiel #12
0
def meta_2_0():
    meta_path = path.join(DATA_DIR, f"{TESTS['legacy-2.0']}.json")
    with open(meta_path, 'r') as f:
        return client.SensorInfo(f.read())
Beispiel #13
0
def meta(base_name: str):
    meta_path = path.join(DATA_DIR, f"{base_name}.json")
    with open(meta_path, 'r') as f:
        return client.SensorInfo(f.read())