예제 #1
0
    def __init__(self):
        self.ORIGINAL_FORWARD = sl.Translation()
        self.ORIGINAL_FORWARD.init_vector(0, 0, 1)
        self.ORIGINAL_UP = sl.Translation()
        self.ORIGINAL_UP.init_vector(0, 1, 0)
        self.ORIGINAL_RIGHT = sl.Translation()
        self.ORIGINAL_RIGHT.init_vector(1, 0, 0)
        self.znear = 0.5
        self.zfar = 100.
        self.horizontalFOV = 70.
        self.orientation_ = sl.Orientation()
        self.position_ = sl.Translation()
        self.forward_ = sl.Translation()
        self.up_ = sl.Translation()
        self.right_ = sl.Translation()
        self.vertical_ = sl.Translation()
        self.vpMatrix_ = sl.Matrix4f()
        self.projection_ = sl.Matrix4f()
        self.projection_.set_identity()
        self.setProjection(1.78)

        self.position_.init_vector(0., 5., -3.)
        tmp = sl.Translation()
        tmp.init_vector(0, 0, -4)
        tmp2 = sl.Translation()
        tmp2.init_vector(0, 1, 0)
        self.setDirection(tmp, tmp2)
        cam_rot = sl.Rotation()
        cam_rot.set_euler_angles(-50., 180., 0., False)
        self.setRotation(cam_rot)
예제 #2
0
    def __init__(self):
        self.ORIGINAL_FORWARD = sl.Translation()
        self.ORIGINAL_FORWARD.init_vector(0,0,1)
        self.ORIGINAL_UP = sl.Translation()
        self.ORIGINAL_UP.init_vector(0,1,0)
        self.ORIGINAL_RIGHT = sl.Translation()
        self.ORIGINAL_RIGHT.init_vector(1,0,0)
        self.znear = 0.5
        self.zfar = 100.
        self.horizontalFOV = 70.
        self.orientation_ = sl.Orientation()
        self.position_ = sl.Translation()
        self.forward_ = sl.Translation()
        self.up_ = sl.Translation()
        self.right_ = sl.Translation()
        self.vertical_ = sl.Translation()
        self.vpMatrix_ = sl.Matrix4f()
        self.offset_ = sl.Translation()
        self.offset_.init_vector(0,0,5)
        self.projection_ = sl.Matrix4f()
        self.projection_.set_identity()
        self.setProjection(1.78)

        self.position_.init_vector(0., 0., 0.)
        tmp = sl.Translation()
        tmp.init_vector(0, 0, -.1)
        tmp2 = sl.Translation()
        tmp2.init_vector(0, 1, 0)
        self.setDirection(tmp, tmp2)        
예제 #3
0
def get_zed_pose(zed, verbose=False):
    """

    Parameters
    ----------
    zed: sl.Camera
        The ZED camera object
    verbose: bool
        If true, will print the translation + orientation to the console

    Returns
    -------
    list
        Pose as [time, tx, ty, tz, ox, oy, oz, ow]
        time is given in seconds.
        The camera position is [tx, ty, tz]
        And orientation is the quaternion [ox, oy, oz, ow]

    """
    # Call zed.grab() each time before calling this function
    zed_pose = sl.Pose()

    # Get the pose of the camera relative to the world frame
    state = zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
    # Display translation and timestamp
    py_translation = sl.Translation()
    tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
    ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
    tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
    time_nsec = zed_pose.timestamp.get_nanoseconds()
    time_sec = float(time_nsec) / 1e9
    if verbose:
        print(
            "Translation: tx: {0}, ty:  {1}, tz:  {2}, timestamp: {3}".format(
                tx, ty, tz, time_sec))
    # Display orientation quaternion
    py_orientation = sl.Orientation()
    ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
    oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
    oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
    ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
    if verbose:
        print("Orientation: ox: {0}, oy:  {1}, oz: {2}, ow: {3}\n".format(
            ox, oy, oz, ow))
    return [time_sec, tx, ty, tz, ox, oy, oz, ow]
예제 #4
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = zed.enable_positional_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()

    zed_sensors = sl.SensorsData()
    runtime_parameters = sl.RuntimeParameters()

    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
            zed.get_sensors_data(zed_sensors, sl.TIME_REFERENCE.IMAGE)
            zed_imu = zed_sensors.get_imu_data()

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".
                  format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                ox, oy, oz, ow))

            #Display the IMU acceleratoin
            acceleration = [0, 0, 0]
            zed_imu.get_linear_acceleration(acceleration)
            ax = round(acceleration[0], 3)
            ay = round(acceleration[1], 3)
            az = round(acceleration[2], 3)
            print("IMU Acceleration: Ax: {0}, Ay: {1}, Az {2}\n".format(
                ax, ay, az))

            #Display the IMU angular velocity
            a_velocity = [0, 0, 0]
            zed_imu.get_angular_velocity(a_velocity)
            vx = round(a_velocity[0], 3)
            vy = round(a_velocity[1], 3)
            vz = round(a_velocity[2], 3)
            print("IMU Angular Velocity: Vx: {0}, Vy: {1}, Vz {2}\n".format(
                vx, vy, vz))

            # Display the IMU orientation quaternion
            zed_imu_pose = sl.Transform()
            ox = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[0], 3)
            oy = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[1], 3)
            oz = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[2], 3)
            ow = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[3], 3)
            print(
                "IMU Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                    ox, oy, oz, ow))

            i = i + 1

    # Close the camera
    zed.close()
def main():
    # global stop_signal
    # signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,#■
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
            exit(1)
        #tracing enable
        py_transform = cams.transform_list[index]
        print("PositionalTrackingParameters start")
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        print("PositionalTrackingParameters end")
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        runtime = sl.RuntimeParameters()
        cams.runtime_list.append(runtime)
        index = index + 1

    #Start camera threads
    # for index in range(0, len(cams.zed_list)):
    #     if cams.zed_list[index].is_opened():
    #         thread_list.append(threading.Thread(target=grab_run, args=(cams,index,)))
    #         thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        fd_cam = f'{basePath}/{cams.name_list[index]}'
        os.makedirs(fd_cam, exist_ok=True)
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr, distortion = get_camera_intrintic_info(cams.zed_list[index])
        filename = f'{fd_cam}/camera-intrinsics.csv'
        np.savetxt(filename, cam_intr)
        filename = f'{fd_cam}/camera-distortion.csv'
        np.savetxt(filename, distortion)

    #*******************************************************************
    take_by_keyinput(cameras, cams)
    # take_by_keyinput_camera_view(cameras, cams)
    #*******************************************************************
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
예제 #6
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform()  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    #added! 
    path = '/media/nvidia/SD1/position.csv'
    position_file = open(path,'w')
    
    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            zed.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            position_file.write("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))

	    # Display the Rotation Matrix
            py_rotationMatrix = zed_pose.get_rotation_matrix()
            position_file.write("Got Rotation Matrix, but did not print\n")

	    # Display the Rotation Vector
            py_rotationVector = zed_pose.get_rotation_vector()
            rx = round(py_rotationVector[0], 3)
            ry = round(py_rotationVector[1], 3)
            rz = round(py_rotationVector[2], 3)
            position_file.write("Rotation Vector: Rx: {0}, Ry: {1}, Rz {2}, Timestamp: {3}\n".format(rx, ry, rz, zed_pose.timestamp))

	    # Display the Euler Angles
            py_eulerAngles = zed_pose.get_euler_angles()
            ex = round(py_eulerAngles[0], 3)
            ey = round(py_eulerAngles[1], 3)
            ez = round(py_eulerAngles[2], 3)
            position_file.write("EulerAngles: EAx: {0}, EAy: {1}, EAz {2}, Timestamp: {3}\n".format(ex, ey, ez, zed_pose.timestamp))



            
            i = i + 1

    # Close the camera
    zed.close()

    # Close file
    position_file.close()
예제 #7
0
 def rotate(self, r): 
     tmp = sl.Orientation()
     tmp.init_rotation(r)
     self.orientation_ = tmp * self.orientation_
     self.updateVectors()
예제 #8
0
def main():
    imu = PX4Data()
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_VGA  # Use HD1080 video mode
    init_params.camera_fps = 120  # Set fps at 60
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Z_UP_X_FWD
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)


    # Enable positional tracking with default parameters
    py_transform = sl.Transform()  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Capture 50 frames and stop
    i = 0
    image = sl.Mat()
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD  # Use STANDARD sensing mode
    prevTimeStamp = 0
    file = open('data/data.txt', 'w')
    key = 0
    depth = sl.Mat()
    point_cloud = sl.Mat()
    pcList = []
    while key != 113:
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS
            timestamp = zed.get_timestamp(sl.TIME_REFERENCE.TIME_REFERENCE_CURRENT)  # Get the timestamp at the time the image was
            dt = (timestamp - prevTimeStamp) * 1.0 / 10 ** 9
            if dt > 0.03:
                # Get the pose of the left eye of the camera with reference to the world frame
                zed.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)

                # Display the translation and timestamp
                py_translation = sl.Translation()
                gnd_pos = zed_pose.get_translation(py_translation).get()
                tx = round(gnd_pos[0], 3)
                ty = round(gnd_pos[1], 3)
                tz = round(gnd_pos[2], 3)
                print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))

                # Display the orientation quaternion
                py_orientation = sl.Orientation()
                quat = zed_pose.get_orientation(py_orientation).get()
                ox = round(quat[0], 3)
                oy = round(quat[1], 3)
                oz = round(quat[2], 3)
                ow = round(quat[3], 3)
                print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))

                zed.retrieve_image(image, sl.VIEW.VIEW_LEFT)
                img = image.get_data()
                cv2.imwrite('data/images/' + str(timestamp) + '.png', img)

                zed.retrieve_measure(depth, sl.MEASURE.MEASURE_DEPTH)
                # Retrieve colored point cloud. Point cloud is aligned on the left image.
                zed.retrieve_measure(point_cloud, sl.MEASURE.MEASURE_XYZRGBA)
                print(point_cloud.get_data().shape)
                pc = np.reshape(point_cloud.get_data(), (1, 376, 672, 4))
                pcList.append(pc)

                cv2.imshow("ZED", img)
                key = cv2.waitKey(1)

                prevTimeStamp = timestamp
                print(dt)
                print("Image resolution: {0} x {1} || Image timestamp: {2}\n".format(image.get_width(), image.get_height(), timestamp))

                file.write('%d '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f %.4f \n' % (timestamp, tx, ty, tz, ox, oy, oz, ow,
                                                       imu.acc.x, imu.acc.y, imu.acc.z,
                                                       imu.gyr.x, imu.gyr.y, imu.gyr.z,
                                                       imu.gps.x, imu.gps.y, imu.gps.z,
                                                       imu.vel.x, imu.vel.y, imu.vel.z,
                                                       imu.quat.x, imu.quat.y, imu.quat.z, imu.quat.w))
                i = i + 1

    # Close the camera
    pc = np.concatenate(pcList, axis=0)
    np.save('pc', pc)
    zed.close()
    file.close()
    imu.close()
    print(repr(status))
    exit()

tracking_params = sl.PositionalTrackingParameters(_enable_pose_smoothing=False,
                                                  _set_floor_as_origin=False,
                                                  _enable_imu_fusion=False)
tracking_params.area_file_path = "nsh_chair.area"  #"smith.area"
zed.enable_positional_tracking(tracking_params)

runtime = sl.RuntimeParameters()
camera_pose = sl.Pose()

camera_info = zed.get_camera_information()

py_translation = sl.Translation()
py_orientation = sl.Orientation()
pose_data = sl.Transform()
sensors_data = sl.SensorsData()

text_translation = ""

ui = HSplit(
    VSplit(
        Log("Timestamp", color=3, border_color=5),
        Log("OLA Data", color=3, border_color=5),
    ),
    VSplit(
        HGauge(val=0, title="tracker accuracy", border_color=5),
        HGauge(val=0, title="mapper accuracy", border_color=5),
    ))
def main():
    global stop_signal
    thread_list = []
    signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
        #tracing enable
        py_transform = cams.transform_list[index]
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        index = index + 1

    #Start camera threads
    for index in range(0, len(cams.zed_list)):
        if cams.zed_list[index].is_opened():
            thread_list.append(
                threading.Thread(target=grab_run, args=(
                    cams,
                    index,
                )))
            thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr = get_camera_intrintic_info(cams.zed_list[index])
        filename = path + cams.name_list[index] + "-camera-intrinsics.txt"
        df = pd.DataFrame(cam_intr)
        df.to_csv(filename, sep=' ', header=None, index=None)

    #*******************************************************************

    index = 0
    take_cam_id = 0
    for cam in cameras:
        get_cam_data(cams, index, take_cam_id)
        index += 1

    stop_signal = True
    #*******************************************************************
    '''
    key = ' '
    take_cam_id=0
    while key != 113 :

        index=0
        image_ocv_cat=None
        depth_image_ocv_cat=None
        for cam in cameras:
            image_ocv, depth_image_ocv=get_cam_color_depth(cams,index)
            if image_ocv_cat is None:
                image_ocv_cat=image_ocv
                depth_image_ocv_cat=depth_image_ocv
            else:
                image_ocv_cat=np.hstack([image_ocv_cat,image_ocv])
                depth_image_ocv_cat=np.hstack([depth_image_ocv_cat,depth_image_ocv])
            index+=1

        cv2.imshow("Image", image_ocv_cat)
        cv2.imshow("Depth", depth_image_ocv_cat)

        key = cv2.waitKey(10)
        if key == 114 or  key == 82:#R
            index = 0
            for cam in cameras:
                # process_key_event(cams,key,index)
                get_cam_data(cams, index,take_cam_id)
                index+=1
            take_cam_id=take_cam_id+1

    cv2.destroyAllWindows()
    '''
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
예제 #11
0
def capture_thread_func():
    global image_np_global, depth_np_global, exit_signal, new_data

    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Z_UP
    init_params.camera_fps = 60
    init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE
    init_params.coordinate_units = sl.UNIT.UNIT_METER
    init_params.svo_real_time_mode = False

    # Open the camera
    err = zed.open(init_params)

    tracking_parameters = sl.TrackingParameters(sl.Transform())
    tracking_parameters.enable_spatial_memory = True
    err = zed.enable_tracking(tracking_parameters)

    while err != sl.ERROR_CODE.SUCCESS:
        err = zed.open(init_params)
        print(err)
        sleep(1)

    image_mat = sl.Mat()
    depth_mat = sl.Mat()
    runtime_parameters = sl.RuntimeParameters()

    while not exit_signal:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            zed.retrieve_image(image_mat,
                               sl.VIEW.VIEW_LEFT,
                               width=width,
                               height=height)
            zed.retrieve_measure(depth_mat,
                                 sl.MEASURE.MEASURE_XYZRGBA,
                                 width=width,
                                 height=height)

            lock.acquire()
            image_np_global = load_image_into_numpy_array(image_mat)
            depth_np_global = load_depth_into_numpy_array(depth_mat)
            new_data = True
            lock.release()

            # For spatial tracking
            tracking_state = zed.get_position(
                zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)

            if tracking_state == sl.TRACKING_STATE.TRACKING_STATE_OK:
                # Getting spatial position
                py_translation = sl.Translation()
                tx = round(
                    zed_pose.get_translation(py_translation).get()[0], 2)
                ty = round(
                    zed_pose.get_translation(py_translation).get()[1], 2)
                tz = round(
                    zed_pose.get_translation(py_translation).get()[2], 2)

                # Getting spatial orientation
                py_orientation = sl.Orientation()
                ox = round(
                    zed_pose.get_orientation(py_orientation).get()[0], 2)
                oy = round(
                    zed_pose.get_orientation(py_orientation).get()[1], 2)
                oz = round(
                    zed_pose.get_orientation(py_orientation).get()[2], 2)
                ow = round(
                    zed_pose.get_orientation(py_orientation).get()[3], 2)

                # Getting spatial orientation
                rotation = zed_pose.get_rotation_vector()
                rx = round(rotation[0], 2)
                ry = round(rotation[1], 2)
                rz = round(rotation[2], 2)

                rx *= (180 / math.pi)
                ry *= (180 / math.pi)
                rz *= (180 / math.pi)

                rx = round(rx, 2)
                ry = round(ry, 2)
                rz = round(rz, 2)

                # Storing some position data
                voi.coord = [tx, ty, tz]
                voi.rotation = [rx, ry, rz]
                voi.orientation = [ox, oy, oz, ow]

        sleep(0.01)

    zed.close()