def main():

    init = sl.InitParameters(camera_resolution=sl.RESOLUTION.HD720,
                                 depth_mode=sl.DEPTH_MODE.PERFORMANCE,
                                 coordinate_units=sl.UNIT.METER,
                                 coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP,
                                 sdk_verbose=True)
    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    transform = sl.Transform()
    tracking_params = sl.PositionalTrackingParameters(transform)
    cam.enable_positional_tracking(tracking_params)

    runtime = sl.RuntimeParameters()
    camera_pose = sl.Pose()

    viewer = tv.PyTrackingViewer()
    viewer.init()

    py_translation = sl.Translation()

    start_zed(cam, runtime, camera_pose, viewer, py_translation)

    viewer.exit()
    glutMainLoop()
示例#2
0
def init_params(menu, cam_id=0):
    global save_dir
    zed = EasyDict({})
    zed.cam = sl.Camera()
    zed.mat = EasyDict({
        'pose': sl.Pose(),
        'translation': sl.Translation(),
        'transform': sl.Transform(),
        'image': sl.Mat(),  # image_map
        'depth': sl.Mat(),  # depth_map
        'point_cloud': sl.Mat(),
        'sensors': sl.SensorsData()  # sensors_data
    })

    zed.param = EasyDict({
        'init':
        sl.InitParameters(
            camera_resolution=mode.resolution[menu.cam.resolution],
            depth_mode=mode.depth[menu.mode.depth],
            coordinate_units=mode.unit[menu.unit],
            coordinate_system=mode.coordinate_system[menu.coordinate_system],
            depth_minimum_distance=menu.depth_range.min,
            depth_maximum_distance=menu.depth_range.max,
            sdk_verbose=verbose),
        'runtime':
        sl.RuntimeParameters(sensing_mode=mode.sensing[menu.mode.sensing]),
        'tracking':
        sl.PositionalTrackingParameters(zed.mat.transform)
    })

    #######
    zed.param.init.set_from_camera_id(cam_id)
    save_dir = save_dir_fmt.format(cam_id)
    return zed
示例#3
0
 def update_pose(self):
     pose = sl.Pose()
     if self.zed.get_position(pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD
                              ) == sl.TRACKING_STATE.TRACKING_STATE_OK:
         t = pose.get_translation()
         self.translation = [t.get()[0], t.get()[1], t.get()[2]]
         o = pose.get_orientation()
         self.orientation = [o.get()[0], o.get()[1], o.get()[2], o.get()[3]]
    def pop(self, local_pose, world_pose, image, depth, objects):
        '''
        pop
            pop data from the FIFO system
        
        Parameters:
            local_pose (sl.Pose): pose of the camera in camera reference frame at objects timestamp
            world_pose (sl.Pose): pose of the camera in world reference frame at objects timestamp
            image (sl.Mat): image data at objects timestamp
            depth (sl.Mat): depth data at objects timestamp
            objects (sl.Objects): objects in the past
        '''
        objects = sl.Objects()
        local_pose = sl.Pose()
        world_pose = sl.Pose()

        if self.objects_tracked_queue:
            tracked_merged_obj = self.objects_tracked_queue[0]
            if (self.init_queue_ts.data_ns == 0):
                self.init_queue_ts = tracked_merged_obj.timestamp

            targetTS_ms = tracked_merged_obj.timestamp.get_milliseconds()

            local_pose = self.find_closest_local_pose_from_ts(targetTS_ms)
            world_pose = self.find_closest_world_pose_from_ts(targetTS_ms)

            if WITH_IMAGE_RETENTION:
                tmp_image = self.find_closest_image_from_ts(targetTS_ms)
                tmp_image.copy_to(image)
                tmp_image.free(sl.MEM.CPU)
                self.image_map_ms[targetTS_ms].free(sl.MEM.CPU)
                del self.image_map_ms[targetTS_ms]

                tmp_depth = self.find_closest_depth_from_ts(targetTS_ms)
                tmp_depth.copy_to(depth)
                tmp_depth.free(sl.MEM.CPU)
                self.depth_map_ms[targetTS_ms].free(sl.MEM.CPU)
                del self.depth_map_ms[targetTS_ms]

            objects = tracked_merged_obj
            self.objects_tracked_queue.popleft()

        return local_pose, world_pose, image, depth, objects
示例#5
0
 def update_pose_z(self):
     pose = sl.Pose()
     if self.zed.get_position(pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD
                              ) == sl.TRACKING_STATE.TRACKING_STATE_OK:
         t = pose.get_translation()
         self.translation = [t.get()[0], t.get()[1], t.get()[2]]
         o = pose.get_orientation()
         euler = self.quaternion_to_euler(o.get()[0],
                                          o.get()[1],
                                          o.get()[2],
                                          o.get()[3])
         new_quat = self.euler_to_quaternion(euler[2], 0, 0)
         self.orientation = new_quat
 def find_closest_world_pose_from_ts(self, timestamp):
     '''
     find_closest_world_pose_from_ts
         Find the sl.Pose (in world reference frame) that matches the given timestamp
     
     Parameters:
         timestamp (int): timestamp in ms (or at least in the same unit as self.cam_world_pose_map_ms)
     Return:
         The matching sl.Pose
     '''
     pose = sl.Pose()
     if timestamp in self.cam_world_pose_map_ms.keys():
         pose = self.cam_world_pose_map_ms[timestamp]
     return pose
示例#7
0
def get_zed_pose(zed, verbose=False):
    """

    Parameters
    ----------
    zed: sl.Camera
        The ZED camera object
    verbose: bool
        If true, will print the translation + orientation to the console

    Returns
    -------
    list
        Pose as [time, tx, ty, tz, ox, oy, oz, ow]
        time is given in seconds.
        The camera position is [tx, ty, tz]
        And orientation is the quaternion [ox, oy, oz, ow]

    """
    # Call zed.grab() each time before calling this function
    zed_pose = sl.Pose()

    # Get the pose of the camera relative to the world frame
    state = zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
    # Display translation and timestamp
    py_translation = sl.Translation()
    tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
    ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
    tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
    time_nsec = zed_pose.timestamp.get_nanoseconds()
    time_sec = float(time_nsec) / 1e9
    if verbose:
        print(
            "Translation: tx: {0}, ty:  {1}, tz:  {2}, timestamp: {3}".format(
                tx, ty, tz, time_sec))
    # Display orientation quaternion
    py_orientation = sl.Orientation()
    ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
    oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
    oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
    ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
    if verbose:
        print("Orientation: ox: {0}, oy:  {1}, oz: {2}, ow: {3}\n".format(
            ox, oy, oz, ow))
    return [time_sec, tx, ty, tz, ox, oy, oz, ow]
示例#8
0
def main():
    imu = PX4Data()
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_VGA  # Use HD1080 video mode
    init_params.camera_fps = 120  # Set fps at 60
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Z_UP_X_FWD
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)


    # Enable positional tracking with default parameters
    py_transform = sl.Transform()  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Capture 50 frames and stop
    i = 0
    image = sl.Mat()
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD  # Use STANDARD sensing mode
    prevTimeStamp = 0
    file = open('data/data.txt', 'w')
    key = 0
    depth = sl.Mat()
    point_cloud = sl.Mat()
    pcList = []
    while key != 113:
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS
            timestamp = zed.get_timestamp(sl.TIME_REFERENCE.TIME_REFERENCE_CURRENT)  # Get the timestamp at the time the image was
            dt = (timestamp - prevTimeStamp) * 1.0 / 10 ** 9
            if dt > 0.03:
                # Get the pose of the left eye of the camera with reference to the world frame
                zed.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)

                # Display the translation and timestamp
                py_translation = sl.Translation()
                gnd_pos = zed_pose.get_translation(py_translation).get()
                tx = round(gnd_pos[0], 3)
                ty = round(gnd_pos[1], 3)
                tz = round(gnd_pos[2], 3)
                print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))

                # Display the orientation quaternion
                py_orientation = sl.Orientation()
                quat = zed_pose.get_orientation(py_orientation).get()
                ox = round(quat[0], 3)
                oy = round(quat[1], 3)
                oz = round(quat[2], 3)
                ow = round(quat[3], 3)
                print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))

                zed.retrieve_image(image, sl.VIEW.VIEW_LEFT)
                img = image.get_data()
                cv2.imwrite('data/images/' + str(timestamp) + '.png', img)

                zed.retrieve_measure(depth, sl.MEASURE.MEASURE_DEPTH)
                # Retrieve colored point cloud. Point cloud is aligned on the left image.
                zed.retrieve_measure(point_cloud, sl.MEASURE.MEASURE_XYZRGBA)
                print(point_cloud.get_data().shape)
                pc = np.reshape(point_cloud.get_data(), (1, 376, 672, 4))
                pcList.append(pc)

                cv2.imshow("ZED", img)
                key = cv2.waitKey(1)

                prevTimeStamp = timestamp
                print(dt)
                print("Image resolution: {0} x {1} || Image timestamp: {2}\n".format(image.get_width(), image.get_height(), timestamp))

                file.write('%d '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f %.4f \n' % (timestamp, tx, ty, tz, ox, oy, oz, ow,
                                                       imu.acc.x, imu.acc.y, imu.acc.z,
                                                       imu.gyr.x, imu.gyr.y, imu.gyr.z,
                                                       imu.gps.x, imu.gps.y, imu.gps.z,
                                                       imu.vel.x, imu.vel.y, imu.vel.z,
                                                       imu.quat.x, imu.quat.y, imu.quat.z, imu.quat.w))
                i = i + 1

    # Close the camera
    pc = np.concatenate(pcList, axis=0)
    np.save('pc', pc)
    zed.close()
    file.close()
    imu.close()
def extract_zed_svofile(svo_input_path, output_path, other_frames=None):
    # Set output path to save frames
    output_path = Path(output_path)
    if not os.path.exists(output_path / 'left'):
        os.makedirs(output_path / 'left')

    if other_frames is not None:
        assert other_frames == 'rgb_right' or other_frames == 'depth_left', 'Parameter:: other_frames = [rgb_right | depth_left]'

    zed, runtime = zutils.configure_zed_camera(img_capture=False,
                                               svo_file=svo_input_path)

    # Start SVO conversion to AVI/SEQUENCE
    sys.stdout.write("Converting SVO... Use Ctrl-C to interrupt conversion.\n")

    nb_frames = zed.get_svo_number_of_frames()
    # Prepare single image containers
    left_image = sl.Mat()
    right_image = sl.Mat()
    depth_image = sl.Mat()
    camera_pose = sl.Pose()
    pose_filename = output_path / 'pose.txt'
    calib_filename = output_path / 'calib.txt'

    with open(pose_filename, 'w') as file:
        while True:
            if zed.grab(runtime) == sl.ERROR_CODE.SUCCESS:
                svo_position = zed.get_svo_position()

                #Retrieve SVO images
                zed.retrieve_image(left_image, sl.VIEW.VIEW_LEFT)

                if other_frames == 'rgb_right':
                    zed.retrieve_image(right_image, sl.VIEW.VIEW_RIGHT)
                elif other_frames == 'depth_left':
                    zed.retrieve_measure(
                        depth_image, sl.MEASURE.MEASURE_DEPTH)  # depth uint16

                # Generate file names
                filename1 = output_path / 'left' / (
                    "%s.png" % str(svo_position - 1).zfill(6))

                # Save Left images
                cv2.imwrite(str(filename1), left_image.get_data())

                if other_frames is not None:
                    if other_frames == 'rgb_right':
                        if not os.path.exists(output_path / 'right'):
                            os.makedirs(output_path / 'right')
                        # Save right images
                        filename2 = output_path / 'right' / (
                            "%s.png" % str(svo_position - 1).zfill(6))
                        cv2.imwrite(str(filename2), right_image.get_data())
                    else:
                        if not os.path.exists(output_path / 'depth'):
                            os.makedirs(output_path / 'depth')
                        # Save depth images (convert to uint16)
                        filename2 = output_path / 'depth' / (
                            "%s.png" % str(svo_position - 1).zfill(6))
                        cv2.imwrite(str(filename2),
                                    depth_image.get_data().astype(np.uint16))

                # Save pose data into txt file
                zed.get_position(camera_pose,
                                 sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
                rotation_matrix = camera_pose.get_rotation_matrix().r
                translation = camera_pose.get_translation().get()
                #print(rotation_matrix)
                #print(translation)
                RT = np.hstack((rotation_matrix, translation.reshape((3, 1))))
                # cam_param['euler_angle'] = camera_pose.get_euler_angles(radian=True)
                # cam_param['quaternion_orientation'] = camera_pose.get_orientation().get()
                # cam_param['rotation_matrix'] = camera_pose.get_rotation_matrix().r
                # cam_param['rodrigues_rotation'] = camera_pose.get_rotation_vector()
                # cam_param['camera_position'] = camera_pose.get_translation().get()
                file.write(",".join(map(str, RT.reshape(-1))) + "\n")

                cam_param = zed.get_camera_information(
                ).calibration_parameters.left_cam
                fx, fy = cam_param.fx, cam_param.fy
                cx, cy = cam_param.cx, cam_param.cy
                np.savetxt(calib_filename, (fx, fy, cx, cy))

                # Display progress
                progress_bar((svo_position + 1) / nb_frames * 100, 30)

                # Check if we have reached the end of the video
                if svo_position >= (nb_frames - 1):  # End of SVO
                    sys.stdout.write(
                        "\nSVO end has been reached. Exiting now.\n")
                    break

    zed.close()
    return 0
示例#10
0
def main():

    if not sys.argv or len(sys.argv) != 2:
        print(
            "Only the path of the output SVO file should be passed as argument."
        )
        exit(1)

    cam = sl.Camera()
    init = sl.InitParameters()

    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP  # Use a right-handed Y-up coordinate system
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # new for SVO
    init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE
    ##

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #from  Positional Tracking:
    # Track the camera position until Keyboard Interupt (ctrl-C)
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    path = '/media/nvidia/SD1/translation.csv'
    position_file = open(path, 'w')
    #END from positional tracking

    pymesh = sl.Mesh()
    print("Processing...")

    #new for SVO
    path_output = sys.argv[1]
    err = cam.enable_recording(
        path_output, sl.SVO_COMPRESSION_MODE.SVO_COMPRESSION_MODE_AVCHD)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)
    print("SVO is Recording, use Ctrl-C to stop.")
    frames_recorded = 0
    ##

    while True:
        try:
            cam.grab(runtime)

            # new for SVO
            state = cam.record()
            if state["status"]:
                frames_recorded += 1
            print("Frame count: " + str(frames_recorded), end="\r")
            ##

            cam.request_mesh_async()
            # Get the pose of the left eye of the camera with reference to the world frame
            cam.get_position(zed_pose,
                             sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            position_file.write("{0},{1},{2},{3}\n".format(
                tx, ty, tz, zed_pose.timestamp))

        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()
            # new for .svo
            cam.disable_recording()
            ##
            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            print_mesh_information(pymesh, apply_texture)

            save_filter(filter_params)
            save_mesh(pymesh)
            cam.close()
            position_file.close()
            save_position(path)
            print("\nFINISH")
            raise
def main(abs_k=None, depth=True, throttle_SP=None, steer_SP=None):
    # Create a Camera object
    zed = sl.Camera()

    # Initilialize Camera Parameters
    init_params = init_zed(depth)

    # Open the camera
    zed_cam = zed.open(init_params)

    if zed_cam != sl.ERROR_CODE.SUCCESS:
        print("Error: Zed Camera could not be opened.")
        exit()

    print("")
    print("Zed Camera succesfully opened.")

    # # Enable positional tracking
    # if depth:
    #     transform = sl.Transform()
    #     tracking_parameters = sl.TrackingParameters(init_pos=transform)
    #     tracking = zed.enable_tracking(tracking_parameters)
    #
    #     if tracking != sl.ERROR_CODE.SUCCESS:
    #         exit()
    #
    #     print("")
    #     print("Tracking succesfully enabled.")

    runtime_parameters = sl.RuntimeParameters()

    zed_pose = sl.Pose()  # Getting Pose

    mat = sl.Mat()

    with open('/media/ctuxavier/ADATASE730H/zed_recording.csv',
              mode='w') as csv_file:  # Open csv file
        print("Start Recording")
        print("Press Ctrl+C to stop")

        fieldnames = ['Tx', 'Ty', 'Tz', 'Pitch', 'Roll', 'Yaw', 'Timestamp']
        writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
        writer.writeheader()

    with open('/media/ctuxavier/ADATASE730H/img_data.txt', 'w') as data_file:
        data_file.write("grab_timestamp, write_timestamp\n")

    threads = []
    # Init and start image acquisition thread
    ImageAcquisition_t = Thread(target=ImageAcquisition,
                                args=(zed, mat, runtime_parameters))
    ImageAcquisition_t.daemon = True
    threads.append(ImageAcquisition_t)
    ImageAcquisition_t.start()

    # if depth:
    #     # Init and start data acquisition thread
    #     DataAcquisiton_t = Thread(target=DataAcquisiton, args=(zed, zed_pose, runtime_parameters))
    #     DataAcquisiton_t.daemon = True
    #     threads.append(DataAcquisiton_t)
    #     DataAcquisiton_t.start()

    # start car detection thread by Pavel Jahoda - for autonomous car chasing
    if carChase:
        car_detection_t = Thread(target=CarDetection,
                                 args=(throttle_SP, steer_SP))
        car_detection_t.daemon = True
        threads.append(car_detection_t)
        car_detection_t.start()

    # Init and start image segmentation thread
    # image_segmentation_t = Thread(target=image_segmentation, args=(abs_k, ))
    # image_segmentation_t.daemon = True
    # threads.append(image_segmentation)
    # image_segmentation_t.start()

    try:
        for thread in threads:
            thread.join()
    except (KeyboardInterrupt, SystemExit):
        print("Zed data and images threads closed!")
示例#12
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform()  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    #added! 
    path = '/media/nvidia/SD1/position.csv'
    position_file = open(path,'w')
    
    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            zed.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            position_file.write("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))

	    # Display the Rotation Matrix
            py_rotationMatrix = zed_pose.get_rotation_matrix()
            position_file.write("Got Rotation Matrix, but did not print\n")

	    # Display the Rotation Vector
            py_rotationVector = zed_pose.get_rotation_vector()
            rx = round(py_rotationVector[0], 3)
            ry = round(py_rotationVector[1], 3)
            rz = round(py_rotationVector[2], 3)
            position_file.write("Rotation Vector: Rx: {0}, Ry: {1}, Rz {2}, Timestamp: {3}\n".format(rx, ry, rz, zed_pose.timestamp))

	    # Display the Euler Angles
            py_eulerAngles = zed_pose.get_euler_angles()
            ex = round(py_eulerAngles[0], 3)
            ey = round(py_eulerAngles[1], 3)
            ez = round(py_eulerAngles[2], 3)
            position_file.write("EulerAngles: EAx: {0}, EAy: {1}, EAz {2}, Timestamp: {3}\n".format(ex, ey, ez, zed_pose.timestamp))



            
            i = i + 1

    # Close the camera
    zed.close()

    # Close file
    position_file.close()
def main():
    global stop_signal
    thread_list = []
    signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
        #tracing enable
        py_transform = cams.transform_list[index]
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        index = index + 1

    #Start camera threads
    for index in range(0, len(cams.zed_list)):
        if cams.zed_list[index].is_opened():
            thread_list.append(
                threading.Thread(target=grab_run, args=(
                    cams,
                    index,
                )))
            thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr = get_camera_intrintic_info(cams.zed_list[index])
        filename = path + cams.name_list[index] + "-camera-intrinsics.txt"
        df = pd.DataFrame(cam_intr)
        df.to_csv(filename, sep=' ', header=None, index=None)

    #*******************************************************************

    index = 0
    take_cam_id = 0
    for cam in cameras:
        get_cam_data(cams, index, take_cam_id)
        index += 1

    stop_signal = True
    #*******************************************************************
    '''
    key = ' '
    take_cam_id=0
    while key != 113 :

        index=0
        image_ocv_cat=None
        depth_image_ocv_cat=None
        for cam in cameras:
            image_ocv, depth_image_ocv=get_cam_color_depth(cams,index)
            if image_ocv_cat is None:
                image_ocv_cat=image_ocv
                depth_image_ocv_cat=depth_image_ocv
            else:
                image_ocv_cat=np.hstack([image_ocv_cat,image_ocv])
                depth_image_ocv_cat=np.hstack([depth_image_ocv_cat,depth_image_ocv])
            index+=1

        cv2.imshow("Image", image_ocv_cat)
        cv2.imshow("Depth", depth_image_ocv_cat)

        key = cv2.waitKey(10)
        if key == 114 or  key == 82:#R
            index = 0
            for cam in cameras:
                # process_key_event(cams,key,index)
                get_cam_data(cams, index,take_cam_id)
                index+=1
            take_cam_id=take_cam_id+1

    cv2.destroyAllWindows()
    '''
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
示例#14
0
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util


# Initial Setup
class vars_of_interest():
    def __init__(self):
        self.coord = []
        self.rotation = []
        self.target_list = []
        self.orientation = []


voi = vars_of_interest()
zed_pose = sl.Pose()
debug = sys.argv[1]
zed_robot = basis.essentials(debug)


def load_image_into_numpy_array(image):
    ar = image.get_data()
    ar = ar[:, :, 0:3]
    (im_height, im_width, channels) = image.get_data().shape
    return np.array(ar).reshape((im_height, im_width, 3)).astype(np.uint8)


def load_depth_into_numpy_array(depth):
    ar = depth.get_data()
    ar = ar[:, :, 0:4]
    (im_height, im_width, channels) = depth.get_data().shape
示例#15
0
def save_parameters(zed, filename):
    print("Saving parameters of the camera >>> ", filename)

    def get_camera_parameters(cam_calibration):

        res_param = {}

        left = cam_calibration.left_cam
        right = cam_calibration.right_cam

        # Save focal length (in pixel)
        res_param['focal_length'] = {}
        res_param['focal_length']['left'] = [left.fx, left.fy]
        res_param['focal_length']['right'] = [right.fx, right.fy]

        # Save distortion factor
        res_param['distortion_factor'] = {}
        res_param['distortion_factor']['left'] = left.disto
        res_param['distortion_factor']['right'] = right.disto

        # Optical center (in pixel)
        res_param['optical_center'] = {}
        res_param['optical_center']['left'] = [left.cx, left.cy]
        res_param['optical_center']['right'] = [right.cx, right.cy]

        # Save field of view (in degrees) [diagonal, horizontal, vertical]
        res_param['fov'] = {}
        res_param['fov']['left'] = [left.d_fov, left.h_fov, left.v_fov]
        res_param['fov']['right'] = [right.d_fov, right.h_fov, right.v_fov]

        # Save extrinsic parameters between two sensors
        res_param['Rotation'] = cam_calibration.R
        res_param['Translation'] = cam_calibration.T

        return res_param

    cam_param = {}

    # Save image information
    cam_param['resolution'] = zed.get_resolution()
    cam_param['fps'] = zed.get_current_fps()
    cam_param['timestamp'] = zed.get_timestamp(
        sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)
    cam_param['confidence_threshold'] = zed.get_confidence_threshold()
    cam_param['depth_range'] = [
        zed.get_depth_min_range_value(),
        zed.get_depth_max_range_value()
    ]

    # Save camera settings
    # More details refer here:
    # file:///home/sokim/Desktop/data_collection/zed-python-api/doc/build/html/video.html?highlight=auto%20white%20balance#pyzed.sl.CAMERA_SETTINGS
    gain = zed.get_camera_settings(sl.CAMERA_SETTINGS.CAMERA_SETTINGS_GAIN)
    brightness = zed.get_camera_settings(
        sl.CAMERA_SETTINGS.CAMERA_SETTINGS_BRIGHTNESS)
    contrast = zed.get_camera_settings(
        sl.CAMERA_SETTINGS.CAMERA_SETTINGS_CONTRAST)
    exposure = zed.get_camera_settings(
        sl.CAMERA_SETTINGS.CAMERA_SETTINGS_EXPOSURE)
    hue = zed.get_camera_settings(sl.CAMERA_SETTINGS.CAMERA_SETTINGS_HUE)
    saturation = zed.get_camera_settings(
        sl.CAMERA_SETTINGS.CAMERA_SETTINGS_SATURATION)
    whitebalance = zed.get_camera_settings(
        sl.CAMERA_SETTINGS.CAMERA_SETTINGS_WHITEBALANCE)

    cam_param['settings'] = [
        gain, brightness, contrast, exposure, hue, saturation, whitebalance
    ]

    # Save camera intrinsic and extrinsic parameters
    cam_info_raw = zed.get_camera_information().calibration_parameters_raw
    cam_info = zed.get_camera_information().calibration_parameters

    cam_param['unrectified'] = get_camera_parameters(cam_info_raw)
    cam_param['rectified'] = get_camera_parameters(cam_info)

    # Save camera position
    camera_pose = sl.Pose()
    zed.get_position(camera_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
    cam_param['euler_angle'] = camera_pose.get_euler_angles(radian=True)
    cam_param['quaternion_orientation'] = camera_pose.get_orientation().get()
    cam_param['rotation_matrix'] = camera_pose.get_rotation_matrix().r
    cam_param['rodrigues_rotation'] = camera_pose.get_rotation_vector()
    cam_param['camera_position'] = camera_pose.get_translation().get()

    with open(filename + '_param.txt', 'w') as file:
        file.write(str(cam_param))
示例#16
0
def main():

    if len(sys.argv) != 3:
        print(
            "Please specify collection time (seconds), and path to save files")
        exit()
    max_time = sys.argv[1]
    print(max_time)
    path = sys.argv[2]
    print(path)
    #delay program 60 sec, so that user can get to start location
    print(
        "\nYou have 60 seconds to get to start location before program will begin"
    )
    time.sleep(60)
    print("\nInitializing camera")

    cam = sl.Camera()
    init = sl.InitParameters()

    # Use HD720 video mode (default Use a right-handed Y-up coordinate system)
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    # Set units in meters
    init.coordinate_units = sl.UNIT.UNIT_METER
    status = cam.open(init)

    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #for Positional Tracking:
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()
    #temporary file to save translation data to, until you know the filename (given from the user)
    #path = '/media/nvidia/SD1/translation.csv'
    position_file = open((path + ".csv"), 'w+')

    pymesh = sl.Mesh()
    print("Camera setup")

    #get start time
    start_time = time.time()

    print("Starting to collect data")

    while (time.time() - start_time) < float(max_time):
        cam.grab(runtime)
        cam.request_mesh_async()
        # Get the pose of the left eye of the camera with reference to the world frame
        cam.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
        cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

        # Display the translation and timestamp
        py_translation = sl.Translation()
        tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
        ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
        tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
        #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
        position_file.write("{0},{1},{2},{3}\n".format(tx, ty, tz,
                                                       zed_pose.timestamp))

    print(
        "Finished collecting data, extracting mesh, saving and shutting down camera"
    )
    cam.extract_whole_mesh(pymesh)
    cam.disable_tracking()
    cam.disable_spatial_mapping()

    filter_params = sl.MeshFilterParameters()
    filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
    print("Filtering params : {0}.".format(pymesh.filter(filter_params)))

    apply_texture = pymesh.apply_texture(
        sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
    print("Applying texture : {0}.".format(apply_texture))
    #print_mesh_information(pymesh, apply_texture)

    #save_filter(filter_params)
    #save_mesh(pymesh)
    cam.close()
    position_file.close()
    #save_position(path)
    save_all_path_arg(filter_params, pymesh, path)
    print("\nFINISH")
示例#17
0
def main():
    print("Running Spatial Mapping sample ... Press 'q' to quit")

    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.coordinate_units = sl.UNIT.METER  # Set coordinate units
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP  # OpenGL coordinates

    # If applicable, use the SVO given as parameter
    # Otherwise use ZED live stream
    if len(sys.argv) == 2:
        filepath = sys.argv[1]
        print("Using SVO file: {0}".format(filepath))
        init_params.set_from_svo_file(filepath)

    # Open the camera
    status = zed.open(init_params)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    # Get camera parameters
    camera_parameters = zed.get_camera_information(
    ).camera_configuration.calibration_parameters.left_cam

    pymesh = sl.Mesh()  # Current incremental mesh
    image = sl.Mat()  # Left image from camera
    pose = sl.Pose()  # Camera pose tracking data

    viewer = gl.GLViewer()
    viewer.init(camera_parameters, pymesh)

    spatial_mapping_parameters = sl.SpatialMappingParameters()
    tracking_state = sl.POSITIONAL_TRACKING_STATE.OFF
    mapping_state = sl.SPATIAL_MAPPING_STATE.NOT_ENABLED
    mapping_activated = False
    last_call = time.time()  # Timestamp of last mesh request

    # Enable positional tracking
    err = zed.enable_positional_tracking()
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        exit()

    # Set runtime parameters
    runtime = sl.RuntimeParameters()

    while viewer.is_available():
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime) == sl.ERROR_CODE.SUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.VIEW.LEFT)
            # Update pose data (used for projection of the mesh over the current image)
            tracking_state = zed.get_position(pose)

            if mapping_activated:
                mapping_state = zed.get_spatial_mapping_state()
                # Compute elapsed time since the last call of Camera.request_spatial_map_async()
                duration = time.time() - last_call
                # Ask for a mesh update if 500ms elapsed since last request
                if (duration > .5 and viewer.chunks_updated()):
                    zed.request_spatial_map_async()
                    last_call = time.time()

                if zed.get_spatial_map_request_status_async(
                ) == sl.ERROR_CODE.SUCCESS:
                    zed.retrieve_spatial_map_async(pymesh)
                    viewer.update_chunks()

            change_state = viewer.update_view(image, pose.pose_data(),
                                              tracking_state, mapping_state)

            if change_state:
                if not mapping_activated:
                    init_pose = sl.Transform()
                    zed.reset_positional_tracking(init_pose)

                    # Configure spatial mapping parameters
                    spatial_mapping_parameters.resolution_meter = sl.SpatialMappingParameters(
                    ).get_resolution_preset(sl.MAPPING_RESOLUTION.MEDIUM)
                    spatial_mapping_parameters.use_chunk_only = True
                    spatial_mapping_parameters.save_texture = False  # Set to True to apply texture over the created mesh
                    spatial_mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.MESH

                    # Enable spatial mapping
                    zed.enable_spatial_mapping()

                    # Clear previous mesh data
                    pymesh.clear()
                    viewer.clear_current_mesh()

                    # Start timer
                    last_call = time.time()

                    mapping_activated = True
                else:
                    # Extract whole mesh
                    zed.extract_whole_spatial_map(pymesh)

                    filter_params = sl.MeshFilterParameters()
                    filter_params.set(sl.MESH_FILTER.MEDIUM)
                    # Filter the extracted mesh
                    pymesh.filter(filter_params, True)
                    viewer.clear_current_mesh()

                    # If textures have been saved during spatial mapping, apply them to the mesh
                    if (spatial_mapping_parameters.save_texture):
                        print("Save texture set to : {}".format(
                            spatial_mapping_parameters.save_texture))
                        pymesh.apply_texture(sl.MESH_TEXTURE_FORMAT.RGBA)

                    # Save mesh as an obj file
                    filepath = "mesh_gen.obj"
                    status = pymesh.save(filepath)
                    if status:
                        print("Mesh saved under " + filepath)
                    else:
                        print("Failed to save the mesh under " + filepath)

                    mapping_state = sl.SPATIAL_MAPPING_STATE.NOT_ENABLED
                    mapping_activated = False

    image.free(memory_type=sl.MEM.CPU)
    pymesh.clear()
    # Disable modules and close camera
    zed.disable_spatial_mapping()
    zed.disable_positional_tracking()
    zed.close()
示例#18
0
def zedGrabber(zedFramesQueue):

    svo_path = "HD1080.svo"
    zed_id = 0
    init_mode = 0

    input_type = sl.InputType()
    if init_mode == 0:
        input_type.set_from_svo_file(svo_path)
    else:
        input_type.set_from_camera_id(zed_id)

    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080
    init.camera_fps = 30
    init.coordinate_units = sl.UNIT.METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Z_UP

    cam = sl.Camera()
    if not cam.is_opened():
        print("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        log.error(repr(status))
        exit()

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = cam.enable_positional_tracking(tracking_parameters)

    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    zed_pose = sl.Pose()
    runtime = sl.RuntimeParameters()

    # Use STANDARD sensing mode
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    mat = sl.Mat()
    point_cloud_mat = sl.Mat()

    # Import the global variables. This lets us instance Darknet once,

    while True:

        start_time = time.time()  # start time of the loop
        err = cam.grab(runtime)

        if (err != sl.ERROR_CODE.SUCCESS):
            break

        cam.retrieve_image(mat, sl.VIEW.LEFT)
        image = mat.get_data()

        cam.retrieve_measure(point_cloud_mat, sl.MEASURE.XYZRGBA)
        depth = point_cloud_mat.get_data()
        cam.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)

        py_translation = sl.Translation()
        tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
        ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
        tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
        camPosition = (tx, ty, tz)
        #print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}".format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))
        camOrientation = zed_pose.get_rotation_vector() * 180 / math.pi
        image = rotate(image, -camOrientation[1], center=None, scale=1.0)
        FPS = 1.0 / (time.time() - start_time)
        # Do the detection
        #rawDetections = detect(netMain, metaMain, image, thresh)
        #detectionsList = detectionsAnalayzer(rawDetections, depth)
        frame = zedFrame(image, depth, camOrientation, camPosition, FPS)
        zedFramesQueue.put(frame)
        #image = cvDrawBoxes(detectionsList, image)
        #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        #image = cv2.resize(image,(1280,720),interpolation=cv2.INTER_LINEAR)
    cam.close()
def main():
    # global stop_signal
    # signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,#■
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
            exit(1)
        #tracing enable
        py_transform = cams.transform_list[index]
        print("PositionalTrackingParameters start")
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        print("PositionalTrackingParameters end")
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        runtime = sl.RuntimeParameters()
        cams.runtime_list.append(runtime)
        index = index + 1

    #Start camera threads
    # for index in range(0, len(cams.zed_list)):
    #     if cams.zed_list[index].is_opened():
    #         thread_list.append(threading.Thread(target=grab_run, args=(cams,index,)))
    #         thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        fd_cam = f'{basePath}/{cams.name_list[index]}'
        os.makedirs(fd_cam, exist_ok=True)
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr, distortion = get_camera_intrintic_info(cams.zed_list[index])
        filename = f'{fd_cam}/camera-intrinsics.csv'
        np.savetxt(filename, cam_intr)
        filename = f'{fd_cam}/camera-distortion.csv'
        np.savetxt(filename, distortion)

    #*******************************************************************
    take_by_keyinput(cameras, cams)
    # take_by_keyinput_camera_view(cameras, cams)
    #*******************************************************************
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
示例#20
0
    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD720,
        coordinate_units=sl.UNIT.METER,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)
    zed = sl.Camera()
    status = zed.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    tracking_params = sl.PositionalTrackingParameters()
    zed.enable_positional_tracking(tracking_params)

    runtime = sl.RuntimeParameters()
    camera_pose = sl.Pose()

    camera_info = zed.get_camera_information()
    # Create OpenGL viewer
    viewer = gl.GLViewer()
    viewer.init(len(sys.argv), sys.argv, camera_info.camera_model)

    py_translation = sl.Translation()
    pose_data = sl.Transform()

    text_translation = ""
    text_rotation = ""

    while viewer.is_available():
        if zed.grab(runtime) == sl.ERROR_CODE.SUCCESS:
            tracking_state = zed.get_position(camera_pose)
示例#21
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = zed.enable_positional_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()

    zed_sensors = sl.SensorsData()
    runtime_parameters = sl.RuntimeParameters()

    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
            zed.get_sensors_data(zed_sensors, sl.TIME_REFERENCE.IMAGE)
            zed_imu = zed_sensors.get_imu_data()

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".
                  format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                ox, oy, oz, ow))

            #Display the IMU acceleratoin
            acceleration = [0, 0, 0]
            zed_imu.get_linear_acceleration(acceleration)
            ax = round(acceleration[0], 3)
            ay = round(acceleration[1], 3)
            az = round(acceleration[2], 3)
            print("IMU Acceleration: Ax: {0}, Ay: {1}, Az {2}\n".format(
                ax, ay, az))

            #Display the IMU angular velocity
            a_velocity = [0, 0, 0]
            zed_imu.get_angular_velocity(a_velocity)
            vx = round(a_velocity[0], 3)
            vy = round(a_velocity[1], 3)
            vz = round(a_velocity[2], 3)
            print("IMU Angular Velocity: Vx: {0}, Vy: {1}, Vz {2}\n".format(
                vx, vy, vz))

            # Display the IMU orientation quaternion
            zed_imu_pose = sl.Transform()
            ox = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[0], 3)
            oy = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[1], 3)
            oz = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[2], 3)
            ow = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[3], 3)
            print(
                "IMU Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                    ox, oy, oz, ow))

            i = i + 1

    # Close the camera
    zed.close()
示例#22
0
def main():

    #if len(sys.argv) != 2:
    #    print("Please specify path to .svo file.")
    #    exit()

    #filepath = sys.argv[1]
    #print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    #init = sl.InitParameters(svo_input_filename=filepath)
    init = sl.InitParameters()

    #new
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    # Use a right-handed Y-up coordinate system
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #from  Positional Tracking:
    # Track the camera position until Keyboard Interupt (ctrl-C)
    #i = 0
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    path = '/media/nvidia/SD1/translation.csv'
    position_file = open(path, 'w')
    #END from positional tracking

    pymesh = sl.Mesh()
    print("Processing...")
    #for i in range(200):
    while True:
        try:
            cam.grab(runtime)
            cam.request_mesh_async()
            # Get the pose of the left eye of the camera with reference to the world frame
            cam.get_position(zed_pose,
                             sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
            position_file.write("{0},{1},{2},{3}\n".format(
                tx, ty, tz, zed_pose.timestamp))

        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()

            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            #print_mesh_information(pymesh, apply_texture)

            #save_filter(filter_params)
            #save_mesh(pymesh)
            cam.close()
            position_file.close()
            #save_position(path)
            save_all(filter_params, pymesh, path)
            print("\nFINISH")
            raise
示例#23
0
def main():
    print("Running Plane Detection sample ... Press 'q' to quit")

    # Create a camera object
    zed = sl.Camera()

    # Set configuration parameters
    init = sl.InitParameters()
    init.coordinate_units = sl.UNIT.METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP  # OpenGL coordinate system

    # If applicable, use the SVO given as parameter
    # Otherwise use ZED live stream
    if len(sys.argv) == 2:
        filepath = sys.argv[1]
        print("Reading SVO file: {0}".format(filepath))
        init.set_from_svo_file(filepath)

    # Open the camera
    status = zed.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)

    # Get camera info and check if IMU data is available
    camera_infos = zed.get_camera_information()
    has_imu = camera_infos.sensors_configuration.gyroscope_parameters.is_available

    # Initialize OpenGL viewer
    viewer = gl.GLViewer()
    viewer.init(
        camera_infos.camera_configuration.calibration_parameters.left_cam,
        has_imu)

    image = sl.Mat()  # current left image
    pose = sl.Pose()  # positional tracking data
    plane = sl.Plane()  # detected plane
    mesh = sl.Mesh()  # plane mesh

    find_plane_status = sl.ERROR_CODE.SUCCESS
    tracking_state = sl.POSITIONAL_TRACKING_STATE.OFF

    # Timestamp of the last mesh request
    last_call = time.time()

    user_action = gl.UserAction()
    user_action.clear()

    # Enable positional tracking before starting spatial mapping
    zed.enable_positional_tracking()

    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.measure3D_reference_frame = sl.REFERENCE_FRAME.WORLD

    while viewer.is_available():
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.VIEW.LEFT)
            # Update pose data (used for projection of the mesh over the current image)
            tracking_state = zed.get_position(pose)

            if tracking_state == sl.POSITIONAL_TRACKING_STATE.OK:
                # Compute elapse time since the last call of plane detection
                duration = time.time() - last_call
                # Ask for a mesh update on mouse click
                if user_action.hit:
                    image_click = [
                        user_action.hit_coord[0] * camera_infos.
                        camera_configuration.camera_resolution.width,
                        user_action.hit_coord[1] * camera_infos.
                        camera_configuration.camera_resolution.height
                    ]
                    find_plane_status = zed.find_plane_at_hit(
                        image_click, plane)

                # Check if 500 ms have elapsed since last mesh request
                if duration > .5 and user_action.press_space:
                    # Update pose data (used for projection of the mesh over the current image)
                    reset_tracking_floor_frame = sl.Transform()
                    find_plane_status = zed.find_floor_plane(
                        plane, reset_tracking_floor_frame)
                    last_call = time.time()

                if find_plane_status == sl.ERROR_CODE.SUCCESS:
                    mesh = plane.extract_mesh()
                    viewer.update_mesh(mesh, plane.type)

            user_action = viewer.update_view(image, pose.pose_data(),
                                             tracking_state)

    viewer.exit()
    image.free(sl.MEM.CPU)
    mesh.clear()

    # Disable modules and close camera
    zed.disable_positional_tracking()
    zed.close()
    track_view_generator = cv_viewer.TrackingViewer(
        tracks_resolution, camera_config.camera_fps,
        init_params.depth_maximum_distance)
    track_view_generator.set_camera_calibration(
        camera_config.calibration_parameters)
    image_track_ocv = np.zeros(
        (tracks_resolution.height, tracks_resolution.width, 4), np.uint8)

    # Will store the 2D image and tracklet views
    global_image = np.full(
        (display_resolution.height,
         display_resolution.width + tracks_resolution.width, 4),
        [245, 239, 239, 255], np.uint8)

    # Camera pose
    cam_w_pose = sl.Pose()
    cam_c_pose = sl.Pose()

    quit_app = False

    while (viewer.is_available() and (quit_app == False)):
        if zed.grab(runtime_params) == sl.ERROR_CODE.SUCCESS:
            # Retrieve objects
            returned_state = zed.retrieve_objects(objects, obj_runtime_param)

            if (returned_state == sl.ERROR_CODE.SUCCESS and objects.is_new):
                # Retrieve point cloud
                zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA,
                                     sl.MEM.CPU, point_cloud_res)
                point_cloud.copy_to(point_cloud_render)
                # Retrieve image