Exemplo n.º 1
0
def main():
    # Create a ZEDCamera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters.
    # Positional tracking needs to be enabled before using spatial mapping
    py_transform = sl.Transform()
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable spatial mapping
    mapping_parameters = sl.SpatialMappingParameters(map_type=sl.SPATIAL_MAP_TYPE.SPATIAL_MAP_TYPE_FUSED_POINT_CLOUD)
    err = zed.enable_spatial_mapping(mapping_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Grab data during 3000 frames
    i = 0
    py_fpc = sl.FusedPointCloud()  # Create a Mesh object
    runtime_parameters = sl.RuntimeParameters()

    while i < 3000:
        # For each new grab, mesh data is updated
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # In the background, spatial mapping will use newly retrieved images, depth and pose to update the mesh
            mapping_state = zed.get_spatial_mapping_state()

            print("\rImages captured: {0} / 3000 || {1}".format(i, mapping_state))

            i = i + 1

    print("\n")

    # Extract, filter and save the mesh in an obj file
    print("Extracting Point Cloud...\n")
    err = zed.extract_whole_spatial_map(py_fpc)
    print(repr(err))
    #print("Filtering Mesh...\n")
    #py_mesh.filter(sl.MeshFilterParameters())  # Filter the mesh (remove unnecessary vertices and faces)
    print("Saving Point Cloud...\n")
    py_fpc.save("fpc.obj")

    # Disable tracking and mapping and close the camera
    zed.disable_spatial_mapping()
    zed.disable_tracking()
    zed.close()
Exemplo n.º 2
0
def main():

    #if len(sys.argv) != 2:
    #    print("Please specify path to .svo file.")
    #    exit()

    #filepath = sys.argv[1]
    #print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    #init = sl.InitParameters(svo_input_filename=filepath)
    init = sl.InitParameters()

    #new
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    # Use a right-handed Y-up coordinate system
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    pymesh = sl.Mesh()
    print("Processing...")
    #for i in range(200):
    while True:
        try:
            cam.grab(runtime)
            cam.request_mesh_async()
        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()

            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            print_mesh_information(pymesh, apply_texture)

            save_filter(filter_params)
            save_mesh(pymesh)
            cam.close()
            print("\nFINISH")
            raise
Exemplo n.º 3
0
def main():
    cam = sl.Camera()
    init = sl.InitParameters()
    init.depth_mode = sl.DEPTH_MODE.ULTRA
    init.coordinate_units = sl.UNIT.METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP

    if len(sys.argv) == 2:
        filepath = sys.argv[1]
        print("Reading SVO file: {0}".format(filepath))
        init.set_from_svo_file(filepath)

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    runtime.measure3D_reference_frame = sl.REFERENCE_FRAME.WORLD
    spatial = sl.SpatialMappingParameters()

    transform = sl.Transform()
    tracking = sl.PositionalTrackingParameters(transform)
    cam.enable_positional_tracking(tracking)

    pymesh = sl.Mesh()
    pyplane = sl.Plane()
    reset_tracking_floor_frame = sl.Transform()
    found = 0
    print("Processing...")
    i = 0
    while i < 1000:
        if cam.grab(runtime) == sl.ERROR_CODE.SUCCESS:
            err = cam.find_floor_plane(pyplane, reset_tracking_floor_frame)
            if i > 200 and err == sl.ERROR_CODE.SUCCESS:
                found = 1
                print('Floor found!')
                pymesh = pyplane.extract_mesh()
                break
            i += 1

    if found == 0:
        print('Floor was not found, please try with another SVO.')
        cam.close()
        exit(0)

    cam.disable_positional_tracking()
    save_mesh(pymesh)
    cam.close()
    print("\nFINISH")
Exemplo n.º 4
0
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init = sl.InitParameters()
    init.input = input_type
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.PositionalTrackingParameters(transform)

    cam.enable_positional_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    pymesh = sl.Mesh()
    print("Processing...")
    for i in range(200):
        cam.grab(runtime)
        cam.request_spatial_map_async()

    cam.extract_whole_spatial_map(pymesh)
    cam.disable_positional_tracking()
    cam.disable_spatial_mapping()

    filter_params = sl.MeshFilterParameters()
    filter_params.set(sl.MESH_FILTER.HIGH)
    print("Filtering params : {0}.".format(pymesh.filter(filter_params)))

    apply_texture = pymesh.apply_texture(sl.MESH_TEXTURE_FORMAT.RGBA)
    print("Applying texture : {0}.".format(apply_texture))
    print_mesh_information(pymesh, apply_texture)

    save_filter(filter_params)
    save_mesh(pymesh)
    cam.close()
    print("\nFINISH")
Exemplo n.º 5
0
def test():
    zed = sl.Camera()

    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP  # Use a right-handed Y-up coordinate system
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    zed.open(init_params)

    # Configure spatial mapping parameters
    mapping_parameters = sl.SpatialMappingParameters(
        sl.MAPPING_RESOLUTION.MAPPING_RESOLUTION_LOW,
        sl.MAPPING_RANGE.MAPPING_RANGE_FAR)
    mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.SPATIAL_MAP_TYPE_MESH
    mapping_parameters.save_texture = True
    filter_params = sl.MeshFilterParameters(
    )  # not available for fused point cloud
    filter_params.set(
        sl.MESH_FILTER.MESH_FILTER_LOW)  # not available for fused point cloud

    # Enable tracking and mapping
    py_transform = sl.Transform()
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    zed.enable_tracking(tracking_parameters)
    zed.enable_spatial_mapping(mapping_parameters)

    mesh = sl.Mesh()  # Create a mesh object
    timer = 0
    runtime_parameters = sl.RuntimeParameters()

    print("Getting Frames...")
    # Grab 500 frames and stop
    while timer < 500:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # When grab() = SUCCESS, a new image, depth and pose is available.
            # Spatial mapping automatically ingests the new data to build the mesh.
            timer += 1

    print("Saving...")
    # Retrieve the spatial map
    zed.extract_whole_spatial_map(mesh)
    # Filter the mesh
    mesh.filter(filter_params)  # not available for fused point cloud
    # Apply the texture
    mesh.apply_texture()  # not available for fused point cloud
    # Save the mesh in .obj format
    mesh.save("mesh.obj")
    print("Done")
Exemplo n.º 6
0
 def __init__(self):
     self.maps = list()
     #self.map = []
     self.mesh = sl.Mesh()
     print("Finding ZED...")
     self.zed = sl.Camera()
     self.translation = [0, 0, 0]
     self.orientation = [0, 0, 0, 0]
     self.init_params = sl.InitParameters()
     py_transform = sl.Transform()
     trackparms = sl.TrackingParameters(init_pos=py_transform)
     trackparms.enable_pose_smoothing = True
     self.tracking_params = trackparms
     self.mapping_params = sl.SpatialMappingParameters(
         resolution=sl.MAPPING_RESOLUTION.MAPPING_RESOLUTION_LOW,
         mapping_range=sl.MAPPING_RANGE.MAPPING_RANGE_FAR,
         save_texture=True)
     self.filter_params = sl.MeshFilterParameters()
     self.runtime_params = sl.RuntimeParameters()
     print("Starting ZED...")
     self.start_camera()
     self.update_pose()
     self.has_requested_map = False
     self.last_update_time = time.time()
Exemplo n.º 7
0
init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP # Use a right-handed Y-up coordinate system
init_params.coordinate_units = sl.UNIT.METER  # Set units in meters

# Open the camera
err = zed.open(init_params)
if err != sl.ERROR_CODE.SUCCESS:
    exit(1)


tracking_parameters = sl.PositionalTrackingParameters()
err = zed.enable_positional_tracking(tracking_parameters)
if err != sl.ERROR_CODE.SUCCESS:
    exit(1)


mapping_parameters = sl.SpatialMappingParameters()
err = zed.enable_spatial_mapping(mapping_parameters)
if err != sl.ERROR_CODE.SUCCESS:
    exit(1)


# Grab data during 3000 frames
i = 0
py_mesh = sl.Mesh()  # Create a Mesh object
runtime_parameters = sl.RuntimeParameters()
while i < 3000:
    if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
        # In background, spatial mapping will use new images, depth and pose to create and update the mesh. No specific functions are required here.
        mapping_state = zed.get_spatial_mapping_state()
        # Print spatial mapping state
        print("\rImages captured: {0} / 3000 || {1}".format(i, mapping_state))
Exemplo n.º 8
0
def main():

    if not sys.argv or len(sys.argv) != 2:
        print(
            "Only the path of the output SVO file should be passed as argument."
        )
        exit(1)

    cam = sl.Camera()
    init = sl.InitParameters()

    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP  # Use a right-handed Y-up coordinate system
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # new for SVO
    init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE
    ##

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #from  Positional Tracking:
    # Track the camera position until Keyboard Interupt (ctrl-C)
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    path = '/media/nvidia/SD1/translation.csv'
    position_file = open(path, 'w')
    #END from positional tracking

    pymesh = sl.Mesh()
    print("Processing...")

    #new for SVO
    path_output = sys.argv[1]
    err = cam.enable_recording(
        path_output, sl.SVO_COMPRESSION_MODE.SVO_COMPRESSION_MODE_AVCHD)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)
    print("SVO is Recording, use Ctrl-C to stop.")
    frames_recorded = 0
    ##

    while True:
        try:
            cam.grab(runtime)

            # new for SVO
            state = cam.record()
            if state["status"]:
                frames_recorded += 1
            print("Frame count: " + str(frames_recorded), end="\r")
            ##

            cam.request_mesh_async()
            # Get the pose of the left eye of the camera with reference to the world frame
            cam.get_position(zed_pose,
                             sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            position_file.write("{0},{1},{2},{3}\n".format(
                tx, ty, tz, zed_pose.timestamp))

        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()
            # new for .svo
            cam.disable_recording()
            ##
            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            print_mesh_information(pymesh, apply_texture)

            save_filter(filter_params)
            save_mesh(pymesh)
            cam.close()
            position_file.close()
            save_position(path)
            print("\nFINISH")
            raise
Exemplo n.º 9
0
    def process_svo_map_and_pose(self,
                                 map_file="map.obj",
                                 poses_file="poses.txt",
                                 n_frames_to_skip=1,
                                 n_frames_to_trim=0):
        """
        Process the ZED SVO file and write to file:
            - A fused point cloud map
            - The camera pose history

        This function processes the SVO file with the maximum possible depth sensing quality,
        to maximize mapping and pose estimation quality.

        Parameters
        ----------
        map_file: str
            Name of the map file to output. Should be .obj file format.
        poses_file: str
            Name of the poses file ot output. Should be .txt format.
        n_frames_to_skip: int, default 0
            If set to 2 or higher, the pose history output will skip frames. Use this to subsample the SVO output.
            For example, skip_frames=2 will include every other frame in the pose file.
            Does not affect the map creation.

        Returns
        -------
        None

        """
        # Initialize the ZED Camera object
        init_params = self.default_init_params()
        init_params.depth_mode = self.depth_quality_map
        zed = sl.Camera()
        err = zed.open(init_params)

        if err == sl.ERROR_CODE.INVALID_SVO_FILE:
            print("Error processing SVO file: '%s'" % self.svo_path)
            return

        # Initialize positional tracking
        tracking_parameters = sl.PositionalTrackingParameters()
        zed.enable_positional_tracking(tracking_parameters)

        # Initialize spatial mapping
        mapping_parameters = sl.SpatialMappingParameters()
        mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.FUSED_POINT_CLOUD
        # mapping_parameters.resolution_meter = mapping_parameters.get_resolution_preset(sl.MAPPING_RESOLUTION.MEDIUM)
        mapping_parameters.resolution_meter = mapping_parameters.get_resolution_preset(
            sl.MAPPING_RESOLUTION.HIGH)

        # Map at short range (3.5m) to maximize quality
        # This should reduce errors like points in the sky
        # mapping_parameters.range_meter = mapping_parameters.get_range_preset(sl.MAPPING_RANGE.SHORT)
        # mapping_parameters.range_meter = mapping_parameters.get_range_preset(sl.MAPPING_RANGE.LONG)
        mapping_parameters.range_meter = mapping_parameters.get_range_preset(
            sl.MAPPING_RANGE.MEDIUM)
        zed.enable_spatial_mapping(mapping_parameters)

        pose_history = []

        n_frames = zed.get_svo_number_of_frames()

        assert type(n_frames_to_skip) == int and (1 <= n_frames_to_skip < n_frames), \
            "n_frames_to_skip parameter must be an int between 1 and number of frames in the SVO."

        svo_position = 0  # Keep a separate counter instead of using zed.get_svo_position() - svo position skips sometimes
        next_frame = 0  # keep track of next frame to process, for subsampling frames

        last_frame = zed.get_svo_number_of_frames() - n_frames_to_trim

        # SVO processing loop
        exit = False
        while not exit:
            # With spatial mapping enabled, zed.grab() updates the map in the background
            err = zed.grab()
            if err == sl.ERROR_CODE.SUCCESS:

                if svo_position < last_frame and svo_position >= next_frame:
                    print("\r[Processing frame %d of %d]" %
                          (svo_position, n_frames),
                          end='')
                    pose_history.append(get_zed_pose(zed))
                    next_frame += n_frames_to_skip
                svo_position += 1

            elif err == sl.ERROR_CODE.END_OF_SVOFILE_REACHED:
                print("Mapping module: Reached end of SVO file")
                exit = True

        # Write outputs
        self.write_poses(pose_history, self.output_path / poses_file)
        self.write_map(zed, self.output_path / map_file)
        zed.close()
Exemplo n.º 10
0
def main():

    #if len(sys.argv) != 2:
    #    print("Please specify path to .svo file.")
    #    exit()

    #filepath = sys.argv[1]
    #print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    #init = sl.InitParameters(svo_input_filename=filepath)
    init = sl.InitParameters()

    #new
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    # Use a right-handed Y-up coordinate system
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #from  Positional Tracking:
    # Track the camera position until Keyboard Interupt (ctrl-C)
    #i = 0
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    path = '/media/nvidia/SD1/translation.csv'
    position_file = open(path, 'w')
    #END from positional tracking

    pymesh = sl.Mesh()
    print("Processing...")
    #for i in range(200):
    while True:
        try:
            cam.grab(runtime)
            cam.request_mesh_async()
            # Get the pose of the left eye of the camera with reference to the world frame
            cam.get_position(zed_pose,
                             sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
            position_file.write("{0},{1},{2},{3}\n".format(
                tx, ty, tz, zed_pose.timestamp))

        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()

            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            #print_mesh_information(pymesh, apply_texture)

            #save_filter(filter_params)
            #save_mesh(pymesh)
            cam.close()
            position_file.close()
            #save_position(path)
            save_all(filter_params, pymesh, path)
            print("\nFINISH")
            raise
Exemplo n.º 11
0
def initProcessing(plane, transform):
    print("Init spatial mapping and camera pose")
    spatial_parameters = sl.SpatialMappingParameters()
    zed.enable_spatial_mapping(spatial_parameters)
    tracking_parameters = sl.TrackingParameters(transform)
    zed.enable_tracking(tracking_parameters)
Exemplo n.º 12
0
def main():
    print("Running Spatial Mapping sample ... Press 'q' to quit")

    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.coordinate_units = sl.UNIT.METER  # Set coordinate units
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP  # OpenGL coordinates

    # If applicable, use the SVO given as parameter
    # Otherwise use ZED live stream
    if len(sys.argv) == 2:
        filepath = sys.argv[1]
        print("Using SVO file: {0}".format(filepath))
        init_params.set_from_svo_file(filepath)

    # Open the camera
    status = zed.open(init_params)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    # Get camera parameters
    camera_parameters = zed.get_camera_information(
    ).camera_configuration.calibration_parameters.left_cam

    pymesh = sl.Mesh()  # Current incremental mesh
    image = sl.Mat()  # Left image from camera
    pose = sl.Pose()  # Camera pose tracking data

    viewer = gl.GLViewer()
    viewer.init(camera_parameters, pymesh)

    spatial_mapping_parameters = sl.SpatialMappingParameters()
    tracking_state = sl.POSITIONAL_TRACKING_STATE.OFF
    mapping_state = sl.SPATIAL_MAPPING_STATE.NOT_ENABLED
    mapping_activated = False
    last_call = time.time()  # Timestamp of last mesh request

    # Enable positional tracking
    err = zed.enable_positional_tracking()
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        exit()

    # Set runtime parameters
    runtime = sl.RuntimeParameters()

    while viewer.is_available():
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime) == sl.ERROR_CODE.SUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.VIEW.LEFT)
            # Update pose data (used for projection of the mesh over the current image)
            tracking_state = zed.get_position(pose)

            if mapping_activated:
                mapping_state = zed.get_spatial_mapping_state()
                # Compute elapsed time since the last call of Camera.request_spatial_map_async()
                duration = time.time() - last_call
                # Ask for a mesh update if 500ms elapsed since last request
                if (duration > .5 and viewer.chunks_updated()):
                    zed.request_spatial_map_async()
                    last_call = time.time()

                if zed.get_spatial_map_request_status_async(
                ) == sl.ERROR_CODE.SUCCESS:
                    zed.retrieve_spatial_map_async(pymesh)
                    viewer.update_chunks()

            change_state = viewer.update_view(image, pose.pose_data(),
                                              tracking_state, mapping_state)

            if change_state:
                if not mapping_activated:
                    init_pose = sl.Transform()
                    zed.reset_positional_tracking(init_pose)

                    # Configure spatial mapping parameters
                    spatial_mapping_parameters.resolution_meter = sl.SpatialMappingParameters(
                    ).get_resolution_preset(sl.MAPPING_RESOLUTION.MEDIUM)
                    spatial_mapping_parameters.use_chunk_only = True
                    spatial_mapping_parameters.save_texture = False  # Set to True to apply texture over the created mesh
                    spatial_mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.MESH

                    # Enable spatial mapping
                    zed.enable_spatial_mapping()

                    # Clear previous mesh data
                    pymesh.clear()
                    viewer.clear_current_mesh()

                    # Start timer
                    last_call = time.time()

                    mapping_activated = True
                else:
                    # Extract whole mesh
                    zed.extract_whole_spatial_map(pymesh)

                    filter_params = sl.MeshFilterParameters()
                    filter_params.set(sl.MESH_FILTER.MEDIUM)
                    # Filter the extracted mesh
                    pymesh.filter(filter_params, True)
                    viewer.clear_current_mesh()

                    # If textures have been saved during spatial mapping, apply them to the mesh
                    if (spatial_mapping_parameters.save_texture):
                        print("Save texture set to : {}".format(
                            spatial_mapping_parameters.save_texture))
                        pymesh.apply_texture(sl.MESH_TEXTURE_FORMAT.RGBA)

                    # Save mesh as an obj file
                    filepath = "mesh_gen.obj"
                    status = pymesh.save(filepath)
                    if status:
                        print("Mesh saved under " + filepath)
                    else:
                        print("Failed to save the mesh under " + filepath)

                    mapping_state = sl.SPATIAL_MAPPING_STATE.NOT_ENABLED
                    mapping_activated = False

    image.free(memory_type=sl.MEM.CPU)
    pymesh.clear()
    # Disable modules and close camera
    zed.disable_spatial_mapping()
    zed.disable_positional_tracking()
    zed.close()
Exemplo n.º 13
0
def configure_zed_camera(img_capture=True, svo_file=None):
    '''
        Configure zed camera according to the capturing mode.
        - When capturing images, it will be configured in 2k@15fps + DEPTH_MODE_ULTRA.
            Also position tracking/spatial mapping will be enabled to store camera's parameters.
        - When capturing videos, it will be configured in 1080HD@30fps + DEPTH_MODE_NONE.

    :param img_capture: Flag of the image capturing mode. Default is True. When set False, it will configured as video capturing mode.
    :param svo_file: When given as a .svo file, it will configured as reading file mode.
    :return: configured zed camera object, runtime parameters

    '''
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.coordinate_units = sl.UNIT.UNIT_MILLIMETER
    # init_params.coordinate_system=sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
    init_params.camera_disable_self_calib = False
    init_params.depth_minimum_distance = 0.5  #in meter
    init_params.enable_right_side_measure = True
    init_params.sdk_verbose = True

    if not img_capture:
        init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD1080
        init_params.camera_fps = 30
        init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE
    else:
        init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD2K
        init_params.camera_fps = 15
        init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE

    if svo_file is not None:
        init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_ULTRA
        init_params.svo_input_filename = str(svo_file)
        init_params.svo_real_time_mode = False  # Don't convert in realtime

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    if svo_file is not None:
        # Positional tracking needs to be enabled before using spatial mapping
        transform = sl.Transform()
        tracking_parameters = sl.TrackingParameters(transform)
        err = zed.enable_tracking(tracking_parameters)
        if err != sl.ERROR_CODE.SUCCESS:
            exit(-1)

        # Enable spatial mapping
        mapping_parameters = sl.SpatialMappingParameters()
        err = zed.enable_spatial_mapping(mapping_parameters)
        if err != sl.ERROR_CODE.SUCCESS:
            exit(-1)

    # Get camera information (ZED serial number)
    zed_serial = zed.get_camera_information().serial_number
    print("Hello! This is my serial number: {0}".format(zed_serial))

    # Set runtime parameters after opening the camera
    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD
    runtime.measure3D_reference_frame = sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD

    return zed, runtime
Exemplo n.º 14
0
def main():

    if len(sys.argv) != 3:
        print(
            "Please specify collection time (seconds), and path to save files")
        exit()
    max_time = sys.argv[1]
    print(max_time)
    path = sys.argv[2]
    print(path)
    #delay program 60 sec, so that user can get to start location
    print(
        "\nYou have 60 seconds to get to start location before program will begin"
    )
    time.sleep(60)
    print("\nInitializing camera")

    cam = sl.Camera()
    init = sl.InitParameters()

    # Use HD720 video mode (default Use a right-handed Y-up coordinate system)
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    # Set units in meters
    init.coordinate_units = sl.UNIT.UNIT_METER
    status = cam.open(init)

    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #for Positional Tracking:
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()
    #temporary file to save translation data to, until you know the filename (given from the user)
    #path = '/media/nvidia/SD1/translation.csv'
    position_file = open((path + ".csv"), 'w+')

    pymesh = sl.Mesh()
    print("Camera setup")

    #get start time
    start_time = time.time()

    print("Starting to collect data")

    while (time.time() - start_time) < float(max_time):
        cam.grab(runtime)
        cam.request_mesh_async()
        # Get the pose of the left eye of the camera with reference to the world frame
        cam.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
        cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

        # Display the translation and timestamp
        py_translation = sl.Translation()
        tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
        ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
        tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
        #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
        position_file.write("{0},{1},{2},{3}\n".format(tx, ty, tz,
                                                       zed_pose.timestamp))

    print(
        "Finished collecting data, extracting mesh, saving and shutting down camera"
    )
    cam.extract_whole_mesh(pymesh)
    cam.disable_tracking()
    cam.disable_spatial_mapping()

    filter_params = sl.MeshFilterParameters()
    filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
    print("Filtering params : {0}.".format(pymesh.filter(filter_params)))

    apply_texture = pymesh.apply_texture(
        sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
    print("Applying texture : {0}.".format(apply_texture))
    #print_mesh_information(pymesh, apply_texture)

    #save_filter(filter_params)
    #save_mesh(pymesh)
    cam.close()
    position_file.close()
    #save_position(path)
    save_all_path_arg(filter_params, pymesh, path)
    print("\nFINISH")
Exemplo n.º 15
0
def main():
    cam = sl.Camera()
    init = sl.InitParameters()
    init.depth_mode = sl.DEPTH_MODE.ULTRA
    init.coordinate_units = sl.UNIT.METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
    
    if len(sys.argv) == 2:
        filepath = sys.argv[1]
        print("Reading SVO file: {0}".format(filepath))
        init.set_from_svo_file(filepath)

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    runtime.measure3D_reference_frame = sl.REFERENCE_FRAME.WORLD
    spatial = sl.SpatialMappingParameters()

    transform = sl.Transform()
    tracking = sl.PositionalTrackingParameters(transform)
    cam.enable_positional_tracking(tracking)

    pymesh = sl.Mesh()
    pyplane = sl.Plane()
    reset_tracking_floor_frame = sl.Transform()
    found = 0
    print("Processing...")
    i = 0




    calibration_params = cam.get_camera_information().calibration_parameters
    # Focal length of the left eye in pixels
    focal_left_x = calibration_params.left_cam.fx
    focal_left_y = calibration_params.left_cam.fy
    # First radial distortion coefficient
    # k1 = calibration_params.left_cam.disto[0]
    # Translation between left and right eye on z-axis
    # tz = calibration_params.T.z
    # Horizontal field of view of the left eye in degrees
    # h_fov = calibration_params.left_cam.h_fov
    print("fx", focal_left_x)
    print("fy", focal_left_y)
    print("cx", calibration_params.left_cam.cx)
    print("cy", calibration_params.left_cam.cy)
    print("distortion", calibration_params.left_cam.disto)

    

    cloud = sl.Mat()
    while i < 1000:
        if cam.grab(runtime) == sl.ERROR_CODE.SUCCESS :
            err = cam.find_floor_plane(pyplane, reset_tracking_floor_frame)
            if err == sl.ERROR_CODE.SUCCESS:
            # if i > 200 and err == sl.ERROR_CODE.SUCCESS:
                found = 1
                print('Floor found!')
                pymesh = pyplane.extract_mesh()
                #get gound plane info
                print("floor normal",pyplane.get_normal())     #print normal of plane
                print("floor equation", pyplane.get_plane_equation())
                print("plane center", pyplane.get_center())
                print("plane pose", pyplane.get_pose())

                # camera_pose = sl.Pose()
                # py_translation = sl.Translation()
                # tracking_state = cam.get_position(camera_pose)
                # # if tracking_state == sl.POSITIONAL_TRACKING_STATE.OK:
                # rotation = camera_pose.get_rotation_vector()
                # rx = round(rotation[0], 2)
                # ry = round(rotation[1], 2)
                # rz = round(rotation[2], 2)

                # translation = camera_pose.get_translation(py_translation)
                # tx = round(translation.get()[0], 2)
                # ty = round(translation.get()[1], 2)
                # tz = round(translation.get()[2], 2)

                # text_translation = str((tx, ty, tz))
                # text_rotation = str((rx, ry, rz))
                # pose_data = camera_pose.pose_data(sl.Transform())
                # print("translation",text_translation)
                # print("rotation",text_rotation)

                cam.retrieve_measure(cloud, sl.MEASURE.XYZRGBA)

                break

                
            i+=1

    if found == 0:
        print('Floor was not found, please try with another SVO.')
        cam.close()
        exit(0)

    #load image
    path = "/home/hcl/Documents/ZED/pics/Explorer_HD1080_SN14932_16-24-06.png"
    im = cv2.imread(path)
    h, w = im.shape[0], im.shape[1]
    print("orignal image shape", h,w)
    im = im[:,:int(w/2),:]      #get left image
    # im = cv2.resize(im,(int(im.shape[1]/2),int(im.shape[0]/2)))


    #warped img
    warped = np.zeros((h,w,3))

    roll = -80
    roll *= np.pi/180

    Rx = np.matrix([
    [1, 0,0],
    [0, np.cos(roll), -np.sin(roll)],
    [0, np.sin(roll), np.cos(roll)]
    ])

    #point cloud
    # i = 500
    # j = 355
    if False:
        point_cloud = sl.Mat()
        cam.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
        for i in range(100):
            for j in range(1000):        
                point3D = point_cloud.get_value(i,j)
                # x = point3D[0]
                # y = point3D[1]
                # z = point3D[2]
                color = point3D[1][3]
                if ~np.isnan(color) and ~np.isinf(color):
                    xyz = point3D[1][:3].reshape((-1,1))
                    # xyz_hom = np.vstack((xyz,1))
                    print('point3D',point3D)
                    warped_index = Rx @ xyz             #NEED TO USE H**O COORDINATES? divide by last column?
                    print('warped_index',warped_index)
                    RGB = im[j,i,:]
                    RGB = np.dstack(([255],[255],[255]))
                    print('RGB',RGB)
                    warped[int(np.round(warped_index[0]+10)),int(np.round(warped_index[1])),:] = RGB

    cam.disable_positional_tracking()
    # save_mesh(pymesh)
    cam.close()
    print("\nFINISH")