Пример #1
0
def main():
    # Create a ZEDCamera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters.
    # Positional tracking needs to be enabled before using spatial mapping
    py_transform = sl.Transform()
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable spatial mapping
    mapping_parameters = sl.SpatialMappingParameters(map_type=sl.SPATIAL_MAP_TYPE.SPATIAL_MAP_TYPE_FUSED_POINT_CLOUD)
    err = zed.enable_spatial_mapping(mapping_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Grab data during 3000 frames
    i = 0
    py_fpc = sl.FusedPointCloud()  # Create a Mesh object
    runtime_parameters = sl.RuntimeParameters()

    while i < 3000:
        # For each new grab, mesh data is updated
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # In the background, spatial mapping will use newly retrieved images, depth and pose to update the mesh
            mapping_state = zed.get_spatial_mapping_state()

            print("\rImages captured: {0} / 3000 || {1}".format(i, mapping_state))

            i = i + 1

    print("\n")

    # Extract, filter and save the mesh in an obj file
    print("Extracting Point Cloud...\n")
    err = zed.extract_whole_spatial_map(py_fpc)
    print(repr(err))
    #print("Filtering Mesh...\n")
    #py_mesh.filter(sl.MeshFilterParameters())  # Filter the mesh (remove unnecessary vertices and faces)
    print("Saving Point Cloud...\n")
    py_fpc.save("fpc.obj")

    # Disable tracking and mapping and close the camera
    zed.disable_spatial_mapping()
    zed.disable_tracking()
    zed.close()
Пример #2
0
def main():

    init = sl.InitParameters(camera_resolution=sl.RESOLUTION.RESOLUTION_HD720,
                                 depth_mode=sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE,
                                 coordinate_units=sl.UNIT.UNIT_METER,
                                 coordinate_system=sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
                                 sdk_verbose=True)
    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    transform = sl.Transform()
    tracking_params = sl.TrackingParameters(transform)
    cam.enable_tracking(tracking_params)

    runtime = sl.RuntimeParameters()
    camera_pose = sl.Pose()

    viewer = tv.PyTrackingViewer()
    viewer.init()

    py_translation = sl.Translation()

    start_zed(cam, runtime, camera_pose, viewer, py_translation)

    viewer.exit()
    glutMainLoop()
Пример #3
0
def main():

    #if len(sys.argv) != 2:
    #    print("Please specify path to .svo file.")
    #    exit()

    #filepath = sys.argv[1]
    #print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    #init = sl.InitParameters(svo_input_filename=filepath)
    init = sl.InitParameters()

    #new
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    # Use a right-handed Y-up coordinate system
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    pymesh = sl.Mesh()
    print("Processing...")
    #for i in range(200):
    while True:
        try:
            cam.grab(runtime)
            cam.request_mesh_async()
        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()

            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            print_mesh_information(pymesh, apply_texture)

            save_filter(filter_params)
            save_mesh(pymesh)
            cam.close()
            print("\nFINISH")
            raise
Пример #4
0
def main():
    cam = sl.Camera()
    init = sl.InitParameters()
    init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_ULTRA
    init.coordinate_units = sl.UNIT.UNIT_METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP

    if len(sys.argv) == 2:
        filepath = sys.argv[1]
        print("Reading SVO file: {0}".format(filepath))
        init.set_from_svo_file(filepath)

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD
    runtime.measure3D_reference_frame = sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD
    spatial = sl.SpatialMappingParameters()

    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)
    cam.enable_tracking(tracking)

    pymesh = sl.Mesh()
    pyplane = sl.Plane()
    reset_tracking_floor_frame = sl.Transform()
    found = 0
    print("Processing...")
    i = 0
    while i < 1000:
        if cam.grab(runtime) == sl.ERROR_CODE.SUCCESS:
            err = cam.find_floor_plane(pyplane, reset_tracking_floor_frame)
            if i > 200 and err == sl.ERROR_CODE.SUCCESS:
                found = 1
                print('Floor found!')
                pymesh = pyplane.extract_mesh()
                break
            i += 1

    if found == 0:
        print('Floor was not found, please try with another SVO.')
        cam.close()
        exit(0)

    cam.disable_tracking()
    save_mesh(pymesh)
    cam.close()
    print("\nFINISH")
Пример #5
0
def test():
    zed = sl.Camera()

    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP  # Use a right-handed Y-up coordinate system
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    zed.open(init_params)

    # Configure spatial mapping parameters
    mapping_parameters = sl.SpatialMappingParameters(
        sl.MAPPING_RESOLUTION.MAPPING_RESOLUTION_LOW,
        sl.MAPPING_RANGE.MAPPING_RANGE_FAR)
    mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.SPATIAL_MAP_TYPE_MESH
    mapping_parameters.save_texture = True
    filter_params = sl.MeshFilterParameters(
    )  # not available for fused point cloud
    filter_params.set(
        sl.MESH_FILTER.MESH_FILTER_LOW)  # not available for fused point cloud

    # Enable tracking and mapping
    py_transform = sl.Transform()
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    zed.enable_tracking(tracking_parameters)
    zed.enable_spatial_mapping(mapping_parameters)

    mesh = sl.Mesh()  # Create a mesh object
    timer = 0
    runtime_parameters = sl.RuntimeParameters()

    print("Getting Frames...")
    # Grab 500 frames and stop
    while timer < 500:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # When grab() = SUCCESS, a new image, depth and pose is available.
            # Spatial mapping automatically ingests the new data to build the mesh.
            timer += 1

    print("Saving...")
    # Retrieve the spatial map
    zed.extract_whole_spatial_map(mesh)
    # Filter the mesh
    mesh.filter(filter_params)  # not available for fused point cloud
    # Apply the texture
    mesh.apply_texture()  # not available for fused point cloud
    # Save the mesh in .obj format
    mesh.save("mesh.obj")
    print("Done")
Пример #6
0
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    init = sl.InitParameters(svo_input_filename=filepath)
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    pymesh = sl.Mesh()
    print("Processing...")
    for i in range(200):
        cam.grab(runtime)
        cam.request_mesh_async()

    cam.extract_whole_mesh(pymesh)
    cam.disable_tracking()
    cam.disable_spatial_mapping()

    filter_params = sl.MeshFilterParameters()
    filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
    print("Filtering params : {0}.".format(pymesh.filter(filter_params)))

    apply_texture = pymesh.apply_texture(
        sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
    print("Applying texture : {0}.".format(apply_texture))
    print_mesh_information(pymesh, apply_texture)

    save_filter(filter_params)
    save_mesh(pymesh)
    cam.close()
    print("\nFINISH")
Пример #7
0
 def __init__(self):
     self.maps = list()
     #self.map = []
     self.mesh = sl.Mesh()
     print("Finding ZED...")
     self.zed = sl.Camera()
     self.translation = [0, 0, 0]
     self.orientation = [0, 0, 0, 0]
     self.init_params = sl.InitParameters()
     py_transform = sl.Transform()
     trackparms = sl.TrackingParameters(init_pos=py_transform)
     trackparms.enable_pose_smoothing = True
     self.tracking_params = trackparms
     self.mapping_params = sl.SpatialMappingParameters(
         resolution=sl.MAPPING_RESOLUTION.MAPPING_RESOLUTION_LOW,
         mapping_range=sl.MAPPING_RANGE.MAPPING_RANGE_FAR,
         save_texture=True)
     self.filter_params = sl.MeshFilterParameters()
     self.runtime_params = sl.RuntimeParameters()
     print("Starting ZED...")
     self.start_camera()
     self.update_pose()
     self.has_requested_map = False
     self.last_update_time = time.time()
Пример #8
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform()  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    #added! 
    path = '/media/nvidia/SD1/position.csv'
    position_file = open(path,'w')
    
    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            zed.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            position_file.write("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))

	    # Display the Rotation Matrix
            py_rotationMatrix = zed_pose.get_rotation_matrix()
            position_file.write("Got Rotation Matrix, but did not print\n")

	    # Display the Rotation Vector
            py_rotationVector = zed_pose.get_rotation_vector()
            rx = round(py_rotationVector[0], 3)
            ry = round(py_rotationVector[1], 3)
            rz = round(py_rotationVector[2], 3)
            position_file.write("Rotation Vector: Rx: {0}, Ry: {1}, Rz {2}, Timestamp: {3}\n".format(rx, ry, rz, zed_pose.timestamp))

	    # Display the Euler Angles
            py_eulerAngles = zed_pose.get_euler_angles()
            ex = round(py_eulerAngles[0], 3)
            ey = round(py_eulerAngles[1], 3)
            ez = round(py_eulerAngles[2], 3)
            position_file.write("EulerAngles: EAx: {0}, EAy: {1}, EAz {2}, Timestamp: {3}\n".format(ex, ey, ez, zed_pose.timestamp))



            
            i = i + 1

    # Close the camera
    zed.close()

    # Close file
    position_file.close()
Пример #9
0
def main():
    imu = PX4Data()
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_VGA  # Use HD1080 video mode
    init_params.camera_fps = 120  # Set fps at 60
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Z_UP_X_FWD
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)


    # Enable positional tracking with default parameters
    py_transform = sl.Transform()  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Capture 50 frames and stop
    i = 0
    image = sl.Mat()
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD  # Use STANDARD sensing mode
    prevTimeStamp = 0
    file = open('data/data.txt', 'w')
    key = 0
    depth = sl.Mat()
    point_cloud = sl.Mat()
    pcList = []
    while key != 113:
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS
            timestamp = zed.get_timestamp(sl.TIME_REFERENCE.TIME_REFERENCE_CURRENT)  # Get the timestamp at the time the image was
            dt = (timestamp - prevTimeStamp) * 1.0 / 10 ** 9
            if dt > 0.03:
                # Get the pose of the left eye of the camera with reference to the world frame
                zed.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)

                # Display the translation and timestamp
                py_translation = sl.Translation()
                gnd_pos = zed_pose.get_translation(py_translation).get()
                tx = round(gnd_pos[0], 3)
                ty = round(gnd_pos[1], 3)
                tz = round(gnd_pos[2], 3)
                print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))

                # Display the orientation quaternion
                py_orientation = sl.Orientation()
                quat = zed_pose.get_orientation(py_orientation).get()
                ox = round(quat[0], 3)
                oy = round(quat[1], 3)
                oz = round(quat[2], 3)
                ow = round(quat[3], 3)
                print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))

                zed.retrieve_image(image, sl.VIEW.VIEW_LEFT)
                img = image.get_data()
                cv2.imwrite('data/images/' + str(timestamp) + '.png', img)

                zed.retrieve_measure(depth, sl.MEASURE.MEASURE_DEPTH)
                # Retrieve colored point cloud. Point cloud is aligned on the left image.
                zed.retrieve_measure(point_cloud, sl.MEASURE.MEASURE_XYZRGBA)
                print(point_cloud.get_data().shape)
                pc = np.reshape(point_cloud.get_data(), (1, 376, 672, 4))
                pcList.append(pc)

                cv2.imshow("ZED", img)
                key = cv2.waitKey(1)

                prevTimeStamp = timestamp
                print(dt)
                print("Image resolution: {0} x {1} || Image timestamp: {2}\n".format(image.get_width(), image.get_height(), timestamp))

                file.write('%d '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f %.4f \n' % (timestamp, tx, ty, tz, ox, oy, oz, ow,
                                                       imu.acc.x, imu.acc.y, imu.acc.z,
                                                       imu.gyr.x, imu.gyr.y, imu.gyr.z,
                                                       imu.gps.x, imu.gps.y, imu.gps.z,
                                                       imu.vel.x, imu.vel.y, imu.vel.z,
                                                       imu.quat.x, imu.quat.y, imu.quat.z, imu.quat.w))
                i = i + 1

    # Close the camera
    pc = np.concatenate(pcList, axis=0)
    np.save('pc', pc)
    zed.close()
    file.close()
    imu.close()
Пример #10
0
def main():

    #if len(sys.argv) != 2:
    #    print("Please specify path to .svo file.")
    #    exit()

    #filepath = sys.argv[1]
    #print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    #init = sl.InitParameters(svo_input_filename=filepath)
    init = sl.InitParameters()

    #new
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    # Use a right-handed Y-up coordinate system
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #from  Positional Tracking:
    # Track the camera position until Keyboard Interupt (ctrl-C)
    #i = 0
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    path = '/media/nvidia/SD1/translation.csv'
    position_file = open(path, 'w')
    #END from positional tracking

    pymesh = sl.Mesh()
    print("Processing...")
    #for i in range(200):
    while True:
        try:
            cam.grab(runtime)
            cam.request_mesh_async()
            # Get the pose of the left eye of the camera with reference to the world frame
            cam.get_position(zed_pose,
                             sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
            position_file.write("{0},{1},{2},{3}\n".format(
                tx, ty, tz, zed_pose.timestamp))

        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()

            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            #print_mesh_information(pymesh, apply_texture)

            #save_filter(filter_params)
            #save_mesh(pymesh)
            cam.close()
            position_file.close()
            #save_position(path)
            save_all(filter_params, pymesh, path)
            print("\nFINISH")
            raise
Пример #11
0
def initProcessing(plane, transform):
    print("Init spatial mapping and camera pose")
    spatial_parameters = sl.SpatialMappingParameters()
    zed.enable_spatial_mapping(spatial_parameters)
    tracking_parameters = sl.TrackingParameters(transform)
    zed.enable_tracking(tracking_parameters)
Пример #12
0
def configure_zed_camera(img_capture=True, svo_file=None):
    '''
        Configure zed camera according to the capturing mode.
        - When capturing images, it will be configured in 2k@15fps + DEPTH_MODE_ULTRA.
            Also position tracking/spatial mapping will be enabled to store camera's parameters.
        - When capturing videos, it will be configured in 1080HD@30fps + DEPTH_MODE_NONE.

    :param img_capture: Flag of the image capturing mode. Default is True. When set False, it will configured as video capturing mode.
    :param svo_file: When given as a .svo file, it will configured as reading file mode.
    :return: configured zed camera object, runtime parameters

    '''
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.coordinate_units = sl.UNIT.UNIT_MILLIMETER
    # init_params.coordinate_system=sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
    init_params.camera_disable_self_calib = False
    init_params.depth_minimum_distance = 0.5  #in meter
    init_params.enable_right_side_measure = True
    init_params.sdk_verbose = True

    if not img_capture:
        init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD1080
        init_params.camera_fps = 30
        init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE
    else:
        init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD2K
        init_params.camera_fps = 15
        init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE

    if svo_file is not None:
        init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_ULTRA
        init_params.svo_input_filename = str(svo_file)
        init_params.svo_real_time_mode = False  # Don't convert in realtime

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    if svo_file is not None:
        # Positional tracking needs to be enabled before using spatial mapping
        transform = sl.Transform()
        tracking_parameters = sl.TrackingParameters(transform)
        err = zed.enable_tracking(tracking_parameters)
        if err != sl.ERROR_CODE.SUCCESS:
            exit(-1)

        # Enable spatial mapping
        mapping_parameters = sl.SpatialMappingParameters()
        err = zed.enable_spatial_mapping(mapping_parameters)
        if err != sl.ERROR_CODE.SUCCESS:
            exit(-1)

    # Get camera information (ZED serial number)
    zed_serial = zed.get_camera_information().serial_number
    print("Hello! This is my serial number: {0}".format(zed_serial))

    # Set runtime parameters after opening the camera
    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD
    runtime.measure3D_reference_frame = sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD

    return zed, runtime
Пример #13
0
def high_speed(args, viewer):
    config = load_config(args)
    dataset_type = config.get('dataset', 'type')
    detection_thresh = config.getfloat('predict', 'detection_thresh')
    min_num_keypoints = config.getint('predict', 'min_num_keypoints')
    model = create_model(args, config)
    svo_file_path = None  #"/home/adujardin/Downloads/5m.svo" #config.get('zed', 'svo_file_path')

    init_cap_params = sl.InitParameters()
    if svo_file_path:
        print("Loading SVO file " + svo_file_path)
        init_cap_params.svo_input_filename = svo_file_path
        init_cap_params.svo_real_time_mode = True
    init_cap_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720
    init_cap_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_ULTRA
    init_cap_params.coordinate_units = sl.UNIT.UNIT_METER
    init_cap_params.depth_stabilization = True
    init_cap_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP

    cap = sl.Camera()
    if not cap.is_opened():
        print("Opening ZED Camera...")
    status = cap.open(init_cap_params)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    py_transform = sl.Transform()
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    cap.enable_tracking(tracking_parameters)

    capture = Capture(cap, model.insize)
    predictor = Predictor(model=model, cap=capture)

    capture.start()
    predictor.start()

    fps_time = 0
    main_event = threading.Event()
    viewer.edges = model.edges

    try:
        while not main_event.is_set() and cap.is_opened():
            try:
                image, feature_map, depth = predictor.get()
                humans = get_humans_by_feature(model, feature_map,
                                               detection_thresh,
                                               min_num_keypoints)
                humans_3d = get_humans3d(humans, depth, model)
            except queue.Empty:
                continue
            except Exception as e:
                print(e)
                break
            pilImg = Image.fromarray(image)
            pilImg = draw_humans(
                model.keypoint_names,
                model.edges,
                pilImg,
                humans,
                None,
                visbbox=config.getboolean('predict', 'visbbox'),
            )
            img_with_humans = cv2.cvtColor(np.asarray(pilImg),
                                           cv2.COLOR_RGB2BGR)
            img_with_humans = cv2.resize(
                img_with_humans,
                (700, 400))  #(3 * model.insize[0], 3 * model.insize[1]))
            msg = 'GPU ON' if chainer.backends.cuda.available else 'GPU OFF'
            msg += ' ' + config.get('model_param', 'model_name')
            fps_display = 'FPS: %f' % (1.0 / (time.time() - fps_time))
            str_to_dsplay = msg + " " + fps_display
            cv2.putText(img_with_humans, fps_display, (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.imshow('Pose Proposal Network' + msg, img_with_humans)

            viewer.update_text(str_to_dsplay)
            viewer.update_humans(humans_3d)

            fps_time = time.time()
            key = cv2.waitKey(1)

            # press Esc to exit
            if key == 27:
                exit
                main_event.set()
    except Exception as e:
        print(e)
    except KeyboardInterrupt:
        main_event.set()

    capture.stop()
    predictor.stop()

    capture.join()
    predictor.join()
    cap.close()
Пример #14
0
def main():

    if len(sys.argv) != 3:
        print(
            "Please specify collection time (seconds), and path to save files")
        exit()
    max_time = sys.argv[1]
    print(max_time)
    path = sys.argv[2]
    print(path)
    #delay program 60 sec, so that user can get to start location
    print(
        "\nYou have 60 seconds to get to start location before program will begin"
    )
    time.sleep(60)
    print("\nInitializing camera")

    cam = sl.Camera()
    init = sl.InitParameters()

    # Use HD720 video mode (default Use a right-handed Y-up coordinate system)
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    # Set units in meters
    init.coordinate_units = sl.UNIT.UNIT_METER
    status = cam.open(init)

    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #for Positional Tracking:
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()
    #temporary file to save translation data to, until you know the filename (given from the user)
    #path = '/media/nvidia/SD1/translation.csv'
    position_file = open((path + ".csv"), 'w+')

    pymesh = sl.Mesh()
    print("Camera setup")

    #get start time
    start_time = time.time()

    print("Starting to collect data")

    while (time.time() - start_time) < float(max_time):
        cam.grab(runtime)
        cam.request_mesh_async()
        # Get the pose of the left eye of the camera with reference to the world frame
        cam.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
        cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

        # Display the translation and timestamp
        py_translation = sl.Translation()
        tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
        ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
        tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
        #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
        position_file.write("{0},{1},{2},{3}\n".format(tx, ty, tz,
                                                       zed_pose.timestamp))

    print(
        "Finished collecting data, extracting mesh, saving and shutting down camera"
    )
    cam.extract_whole_mesh(pymesh)
    cam.disable_tracking()
    cam.disable_spatial_mapping()

    filter_params = sl.MeshFilterParameters()
    filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
    print("Filtering params : {0}.".format(pymesh.filter(filter_params)))

    apply_texture = pymesh.apply_texture(
        sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
    print("Applying texture : {0}.".format(apply_texture))
    #print_mesh_information(pymesh, apply_texture)

    #save_filter(filter_params)
    #save_mesh(pymesh)
    cam.close()
    position_file.close()
    #save_position(path)
    save_all_path_arg(filter_params, pymesh, path)
    print("\nFINISH")
Пример #15
0
def main(abs_k=None, depth=True, throttle_SP=None, steer_SP=None):
    # Create a Camera object
    zed = sl.Camera()

    # Initilialize Camera Parameters
    init_params = init_zed(depth)

    # Open the camera
    zed_cam = zed.open(init_params)

    if zed_cam != sl.ERROR_CODE.SUCCESS:
        print("Error: Zed Camera could not be opened.")
        exit()

    print("")
    print("Zed Camera succesfully opened.")

    # Enable positional tracking
    if depth:
        transform = sl.Transform()
        tracking_parameters = sl.TrackingParameters(init_pos=transform)
        tracking = zed.enable_tracking(tracking_parameters)

        if tracking != sl.ERROR_CODE.SUCCESS:
            exit()

        print("")
        print("Tracking succesfully enabled.")

    runtime_parameters = sl.RuntimeParameters()

    zed_pose = sl.Pose()  # Getting Pose

    mat = sl.Mat()

    with open('/media/ctuxavier/ADATASE730H/zed_recording.csv',
              mode='w') as csv_file:  # Open csv file
        print("Start Recording")
        print("Press Ctrl+C to stop")

        fieldnames = ['Tx', 'Ty', 'Tz', 'Pitch', 'Roll', 'Yaw', 'Timestamp']
        writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
        writer.writeheader()

    with open('/media/ctuxavier/ADATASE730H/img_data.txt', 'w') as data_file:
        data_file.write("grab_timestamp, write_timestamp\n")

    threads = []
    # Init and start image acquisition thread
    ImageAcquisition_t = Thread(target=ImageAcquisition,
                                args=(zed, mat, runtime_parameters))
    ImageAcquisition_t.daemon = True
    threads.append(ImageAcquisition_t)
    ImageAcquisition_t.start()

    if depth:
        # Init and start data acquisition thread
        DataAcquisiton_t = Thread(target=DataAcquisiton,
                                  args=(zed, zed_pose, runtime_parameters))
        DataAcquisiton_t.daemon = True
        threads.append(DataAcquisiton_t)
        DataAcquisiton_t.start()

    # start car detection thread by Pavel Jahoda - for autonomous car chasing
    if carChase:
        car_detection_t = Thread(target=CarDetection,
                                 args=(throttle_SP, steer_SP))
        threads.append(car_detection_t)
        car_detection_t.start()

    # Init and start image segmentation thread
    image_segmentation_t = Thread(target=image_segmentation, args=(abs_k, ))
    image_segmentation_t.daemon = True
    threads.append(image_segmentation)
    image_segmentation_t.start()

    try:
        for thread in threads:
            thread.join()
    except (KeyboardInterrupt, SystemExit):
        print("Zed data and images threads closed!")
Пример #16
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose,
                             sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            zed.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".
                  format(tx, ty, tz, zed_pose.timestamp))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                ox, oy, oz, ow))

            #Display the IMU acceleratoin
            acceleration = [0, 0, 0]
            zed_imu.get_linear_acceleration(acceleration)
            ax = round(acceleration[0], 3)
            ay = round(acceleration[1], 3)
            az = round(acceleration[2], 3)
            print("IMU Acceleration: Ax: {0}, Ay: {1}, Az {2}\n".format(
                ax, ay, az))

            #Display the IMU angular velocity
            a_velocity = [0, 0, 0]
            zed_imu.get_angular_velocity(a_velocity)
            vx = round(a_velocity[0], 3)
            vy = round(a_velocity[1], 3)
            vz = round(a_velocity[2], 3)
            print("IMU Angular Velocity: Vx: {0}, Vy: {1}, Vz {2}\n".format(
                vx, vy, vz))

            # Display the IMU orientation quaternion
            imu_orientation = sl.Orientation()
            ox = round(zed_imu.get_orientation(imu_orientation).get()[0], 3)
            oy = round(zed_imu.get_orientation(imu_orientation).get()[1], 3)
            oz = round(zed_imu.get_orientation(imu_orientation).get()[2], 3)
            ow = round(zed_imu.get_orientation(imu_orientation).get()[3], 3)
            print(
                "IMU Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                    ox, oy, oz, ow))

            i = i + 1

    # Close the camera
    zed.close()
Пример #17
0
def main():

    if not sys.argv or len(sys.argv) != 2:
        print(
            "Only the path of the output SVO file should be passed as argument."
        )
        exit(1)

    cam = sl.Camera()
    init = sl.InitParameters()

    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP  # Use a right-handed Y-up coordinate system
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # new for SVO
    init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE
    ##

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #from  Positional Tracking:
    # Track the camera position until Keyboard Interupt (ctrl-C)
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    path = '/media/nvidia/SD1/translation.csv'
    position_file = open(path, 'w')
    #END from positional tracking

    pymesh = sl.Mesh()
    print("Processing...")

    #new for SVO
    path_output = sys.argv[1]
    err = cam.enable_recording(
        path_output, sl.SVO_COMPRESSION_MODE.SVO_COMPRESSION_MODE_AVCHD)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)
    print("SVO is Recording, use Ctrl-C to stop.")
    frames_recorded = 0
    ##

    while True:
        try:
            cam.grab(runtime)

            # new for SVO
            state = cam.record()
            if state["status"]:
                frames_recorded += 1
            print("Frame count: " + str(frames_recorded), end="\r")
            ##

            cam.request_mesh_async()
            # Get the pose of the left eye of the camera with reference to the world frame
            cam.get_position(zed_pose,
                             sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            position_file.write("{0},{1},{2},{3}\n".format(
                tx, ty, tz, zed_pose.timestamp))

        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()
            # new for .svo
            cam.disable_recording()
            ##
            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            print_mesh_information(pymesh, apply_texture)

            save_filter(filter_params)
            save_mesh(pymesh)
            cam.close()
            position_file.close()
            save_position(path)
            print("\nFINISH")
            raise
Пример #18
0
def capture_thread_func():
    global image_np_global, depth_np_global, exit_signal, new_data

    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Z_UP
    init_params.camera_fps = 60
    init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE
    init_params.coordinate_units = sl.UNIT.UNIT_METER
    init_params.svo_real_time_mode = False

    # Open the camera
    err = zed.open(init_params)

    tracking_parameters = sl.TrackingParameters(sl.Transform())
    tracking_parameters.enable_spatial_memory = True
    err = zed.enable_tracking(tracking_parameters)

    while err != sl.ERROR_CODE.SUCCESS:
        err = zed.open(init_params)
        print(err)
        sleep(1)

    image_mat = sl.Mat()
    depth_mat = sl.Mat()
    runtime_parameters = sl.RuntimeParameters()

    while not exit_signal:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            zed.retrieve_image(image_mat,
                               sl.VIEW.VIEW_LEFT,
                               width=width,
                               height=height)
            zed.retrieve_measure(depth_mat,
                                 sl.MEASURE.MEASURE_XYZRGBA,
                                 width=width,
                                 height=height)

            lock.acquire()
            image_np_global = load_image_into_numpy_array(image_mat)
            depth_np_global = load_depth_into_numpy_array(depth_mat)
            new_data = True
            lock.release()

            # For spatial tracking
            tracking_state = zed.get_position(
                zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)

            if tracking_state == sl.TRACKING_STATE.TRACKING_STATE_OK:
                # Getting spatial position
                py_translation = sl.Translation()
                tx = round(
                    zed_pose.get_translation(py_translation).get()[0], 2)
                ty = round(
                    zed_pose.get_translation(py_translation).get()[1], 2)
                tz = round(
                    zed_pose.get_translation(py_translation).get()[2], 2)

                # Getting spatial orientation
                py_orientation = sl.Orientation()
                ox = round(
                    zed_pose.get_orientation(py_orientation).get()[0], 2)
                oy = round(
                    zed_pose.get_orientation(py_orientation).get()[1], 2)
                oz = round(
                    zed_pose.get_orientation(py_orientation).get()[2], 2)
                ow = round(
                    zed_pose.get_orientation(py_orientation).get()[3], 2)

                # Getting spatial orientation
                rotation = zed_pose.get_rotation_vector()
                rx = round(rotation[0], 2)
                ry = round(rotation[1], 2)
                rz = round(rotation[2], 2)

                rx *= (180 / math.pi)
                ry *= (180 / math.pi)
                rz *= (180 / math.pi)

                rx = round(rx, 2)
                ry = round(ry, 2)
                rz = round(rz, 2)

                # Storing some position data
                voi.coord = [tx, ty, tz]
                voi.rotation = [rx, ry, rz]
                voi.orientation = [ox, oy, oz, ow]

        sleep(0.01)

    zed.close()