Пример #1
0
    def __init__(self):
        self.ORIGINAL_FORWARD = sl.Translation()
        self.ORIGINAL_FORWARD.init_vector(0, 0, 1)
        self.ORIGINAL_UP = sl.Translation()
        self.ORIGINAL_UP.init_vector(0, 1, 0)
        self.ORIGINAL_RIGHT = sl.Translation()
        self.ORIGINAL_RIGHT.init_vector(1, 0, 0)
        self.znear = 0.5
        self.zfar = 100.
        self.horizontalFOV = 70.
        self.orientation_ = sl.Orientation()
        self.position_ = sl.Translation()
        self.forward_ = sl.Translation()
        self.up_ = sl.Translation()
        self.right_ = sl.Translation()
        self.vertical_ = sl.Translation()
        self.vpMatrix_ = sl.Matrix4f()
        self.projection_ = sl.Matrix4f()
        self.projection_.set_identity()
        self.setProjection(1.78)

        self.position_.init_vector(0., 5., -3.)
        tmp = sl.Translation()
        tmp.init_vector(0, 0, -4)
        tmp2 = sl.Translation()
        tmp2.init_vector(0, 1, 0)
        self.setDirection(tmp, tmp2)
        cam_rot = sl.Rotation()
        cam_rot.set_euler_angles(-50., 180., 0., False)
        self.setRotation(cam_rot)
Пример #2
0
    def update(self):
        if(self.mouse_button[0]):
            r = sl.Rotation()
            vert=self.camera.vertical_
            tmp = vert.get()
            vert.init_vector(tmp[0] * 1.,tmp[1] * 1., tmp[2] * 1.)
            r.init_angle_translation(self.mouseMotion[0] * 0.002, vert)
            self.camera.rotate(r)

            r.init_angle_translation(self.mouseMotion[1] * 0.002, self.camera.right_)
            self.camera.rotate(r)
        
        if(self.mouse_button[1]):
            t = sl.Translation()
            tmp = self.camera.right_.get()
            scale = self.mouseMotion[0] *-0.01
            t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale)
            self.camera.translate(t)
            
            tmp = self.camera.up_.get()
            scale = self.mouseMotion[1] * 0.01
            t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale)
            self.camera.translate(t)

        if (self.wheelPosition != 0):          
            t = sl.Translation()  
            tmp = self.camera.forward_.get()
            scale = self.wheelPosition * -0.065
            t.init_vector(tmp[0] * scale, tmp[1] * scale, tmp[2] * scale)
            self.camera.translate(t)

        self.camera.update()

        self.mouseMotion = [0., 0.]
        self.wheelPosition = 0
Пример #3
0
def main():

    init = sl.InitParameters(camera_resolution=sl.RESOLUTION.HD720,
                                 depth_mode=sl.DEPTH_MODE.PERFORMANCE,
                                 coordinate_units=sl.UNIT.METER,
                                 coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP,
                                 sdk_verbose=True)
    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    transform = sl.Transform()
    tracking_params = sl.PositionalTrackingParameters(transform)
    cam.enable_positional_tracking(tracking_params)

    runtime = sl.RuntimeParameters()
    camera_pose = sl.Pose()

    viewer = tv.PyTrackingViewer()
    viewer.init()

    py_translation = sl.Translation()

    start_zed(cam, runtime, camera_pose, viewer, py_translation)

    viewer.exit()
    glutMainLoop()
def get_pos_dt(cams, index, cnt_r=6):

    zed, zed_pose, zed_sensors = cams.zed_list[index], cams.pose_list[
        index], cams.zed_sensors_list[index]

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py
    zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
    # zed.get_sensors_data(zed_sensors, sl.TIME_REFERENCE.IMAGE)
    # zed_imu = zed_sensors.get_imu_data()  # Display the translation and timestamp
    py_translation = cams.py_translation_list[index]
    py_orientation = cams.py_orientation_list[index]
    # py_orientation = sl.Orientation()
    tx = round(zed_pose.get_translation(py_translation).get()[0], cnt_r)
    ty = round(zed_pose.get_translation(py_translation).get()[1], cnt_r)
    tz = round(zed_pose.get_translation(py_translation).get()[2], cnt_r)
    # print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))

    # Display the orientation quaternion

    ox = round(zed_pose.get_orientation(py_orientation).get()[0], cnt_r)
    oy = round(zed_pose.get_orientation(py_orientation).get()[1], cnt_r)
    oz = round(zed_pose.get_orientation(py_orientation).get()[2], cnt_r)
    ow = round(zed_pose.get_orientation(py_orientation).get()[3], cnt_r)
    pose_lst = [tx, ty, tz, ow, ox, oy, oz]
    # get camera transform data
    R = zed_pose.get_rotation_matrix(sl.Rotation()).r.T / 1000
    t = zed_pose.get_translation(sl.Translation()).get()
    world2cam = np.hstack((R, np.dot(-R, t).reshape(3, -1)))
    K = get_camera_intrintic_info(zed)
    P = np.dot(K, world2cam)
    transform = np.vstack((P, [0, 0, 0, 1]))
    return pose_lst, transform
Пример #5
0
 def to_cv_point(self, x, z):
     out = []
     if isinstance(x, float) and isinstance(z, float):
         out = [
             int((x - self.x_min) / self.x_step),
             int((z - self.z_min) / self.z_step)
         ]
     elif isinstance(x, list) and isinstance(z, sl.Pose):
         # Go to camera current pose
         rotation = z.get_rotation_matrix()
         rotation.inverse()
         tmp = x - (z.get_translation() * rotation.get_orientation()).get()
         new_position = sl.Translation()
         new_position.init_vector(tmp[0], tmp[1], tmp[2])
         out = [
             int(((new_position.get()[0] - self.x_min) / self.x_step) +
                 0.5),
             int(((new_position.get()[2] - self.z_min) / self.z_step) + 0.5)
         ]
     elif isinstance(x, TrackPoint) and isinstance(z, sl.Pose):
         pos = x.get_xyz()
         out = self.to_cv_point(pos, z)
     else:
         print("Unhandled argument type")
     return out
Пример #6
0
def zed_init():
    '''
	##作者:左家乐
	##日期:2020-08-01
	##功能:Init the ZED
	##IN-para : no
	##return : err()
	'''
    camera_settings = sl.VIDEO_SETTINGS.BRIGHTNESS
    str_camera_settings = "BRIGHTNESS"
    step_camera_settings = 1
    print("3.Detected the ZED...")
    global cam
    cam = sl.Camera()
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.UNIT.METER  # Use meter units (for depth measurements)
    init_params.camera_resolution = sl.RESOLUTION.HD720
    err = cam.open(init_params)  #if failed to open zed ,return 0
    global runtime_parameters
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.STANDARD  # Use STANDARD sensing mode
    runtime_parameters.confidence_threshold = 100
    runtime_parameters.textureness_confidence_threshold = 100
    global mat
    mat = sl.Mat()
    global depth
    depth = sl.Mat()
    global point_cloud
    point_cloud = sl.Mat()
    mirror_ref = sl.Transform()
    mirror_ref.set_translation(sl.Translation(2.75, 4.0, 0))
    tr_np = mirror_ref.m
    return err
Пример #7
0
    def generate_view(self, objects, current_camera_pose, tracking_view,
                      tracking_enabled):
        # To get position in WORLD reference
        for obj in objects.object_list:
            pos = obj.position
            tmp_pos = sl.Translation()
            tmp_pos.init_vector(pos[0], pos[1], pos[2])
            new_pos = (tmp_pos * current_camera_pose.get_orientation()
                       ).get() + current_camera_pose.get_translation().get()
            obj.position = np.array([new_pos[0], new_pos[1], new_pos[2]])

        # Initialize visualisation
        if (not self.has_background_ready):
            self.generate_background()

        np.copyto(tracking_view, self.background, 'no')

        if (tracking_enabled):
            # First add new points and remove the ones that are too old
            current_timestamp = objects.timestamp.get_seconds()
            self.add_to_tracklets(objects, current_timestamp)
            self.prune_old_points(current_timestamp)

            # Draw all tracklets
            self.draw_tracklets(tracking_view, current_camera_pose)
        else:
            self.draw_points(objects.object_list, tracking_view,
                             current_camera_pose)
def DataAcquisiton(zed, zed_pose, runtime_parameters):

    while True:

        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:

            #time1 = time.time()

            zed.get_position(
                zed_pose,
                sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)  # Updating Pose

            # Translation
            translation = sl.Translation()
            tx = round(zed_pose.get_translation(translation).get()[0], 3)
            ty = round(zed_pose.get_translation(translation).get()[1], 3)
            tz = round(zed_pose.get_translation(translation).get()[2], 3)

            # Rotation
            rotation = zed_pose.get_rotation_vector()
            rx = round(rotation[0], 2)
            ry = round(rotation[1], 2)
            rz = round(rotation[2], 2)

            with open('/media/ctuxavier/ADATASE730H/zed_recording.csv',
                      'a') as csv_file:
                writer = csv.writer(csv_file)
                writer.writerow([ty, tx, tz, rx, ry, rz, time.time()])
Пример #9
0
def init_params(menu, cam_id=0):
    global save_dir
    zed = EasyDict({})
    zed.cam = sl.Camera()
    zed.mat = EasyDict({
        'pose': sl.Pose(),
        'translation': sl.Translation(),
        'transform': sl.Transform(),
        'image': sl.Mat(),  # image_map
        'depth': sl.Mat(),  # depth_map
        'point_cloud': sl.Mat(),
        'sensors': sl.SensorsData()  # sensors_data
    })

    zed.param = EasyDict({
        'init':
        sl.InitParameters(
            camera_resolution=mode.resolution[menu.cam.resolution],
            depth_mode=mode.depth[menu.mode.depth],
            coordinate_units=mode.unit[menu.unit],
            coordinate_system=mode.coordinate_system[menu.coordinate_system],
            depth_minimum_distance=menu.depth_range.min,
            depth_maximum_distance=menu.depth_range.max,
            sdk_verbose=verbose),
        'runtime':
        sl.RuntimeParameters(sensing_mode=mode.sensing[menu.mode.sensing]),
        'tracking':
        sl.PositionalTrackingParameters(zed.mat.transform)
    })

    #######
    zed.param.init.set_from_camera_id(cam_id)
    save_dir = save_dir_fmt.format(cam_id)
    return zed
def get_imu_pose(cams, index, cnt_r=10):
    err = cams.zed_list[index].grab(cams.runtime_list[index])
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)
    zed = cams.zed_list[index]
    zed_pose = cams.pose_list[index]
    zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
    py_translation = sl.Translation()  #cams.py_translation_list[index]

    tx = round(zed_pose.get_translation(py_translation).get()[0], cnt_r)
    ty = round(zed_pose.get_translation(py_translation).get()[1], cnt_r)
    tz = round(zed_pose.get_translation(py_translation).get()[2], cnt_r)

    zed_sensors = cams.zed_sensors_list[index]
    zed_imu = zed_sensors.get_imu_data()
    zed_imu_pose = cams.transform_list[index]  #sl.Transform()
    ox = round(
        zed_imu.get_pose(zed_imu_pose).get_orientation().get()[0], cnt_r)
    oy = round(
        zed_imu.get_pose(zed_imu_pose).get_orientation().get()[1], cnt_r)
    oz = round(
        zed_imu.get_pose(zed_imu_pose).get_orientation().get()[2], cnt_r)
    ow = round(
        zed_imu.get_pose(zed_imu_pose).get_orientation().get()[3], cnt_r)
    imu_pose_lst = [tx, ty, tz, ow, ox, oy, oz]
    return imu_pose_lst
Пример #11
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    #init
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.UNIT.MILLIMETER  # Use milliliter units (for depth measurements)

    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.STANDARD  # Use STANDARD sensing mode

    # Capture 50 images and depth, then stop
    i = 0
    image = sl.Mat()
    depth = sl.Mat()
    point_cloud = sl.Mat()

    mirror_ref = sl.Transform()
    mirror_ref.set_translation(sl.Translation(2.75, 4.0, 0))
    tr_np = mirror_ref.m

    while i < 50:
        # A new image is available if grab() returns SUCCESS
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.VIEW.LEFT)
            zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)

            x = round(image.get_width() / 2)
            y = round(image.get_height() / 2)
            print("width: ", x)
            print("height: ", y)
            err, point_cloud_value = point_cloud.get_value(x, y)

            #distance formula
            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

            point_cloud_np = point_cloud.get_data()
            point_cloud_np.dot(tr_np)

            if not np.isnan(distance) and not np.isinf(distance):
                distance = round(distance)
                print("Distance to Camera at ({0}, {1}): {2} mm\n".format(
                    x, y, distance))
                # Increment the loop
                i = i + 1
            else:
                print("invalid\n")
            sys.stdout.flush()

    zed.close()
def cal():
    zed = sl.Camera()
    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.UNIT.METER  # Use meter units (for depth measurements)
    init_params.camera_resolution = sl.RESOLUTION.HD720

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Create and set RuntimeParameters after opening the camera
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.STANDARD  # Use STANDARD sensing mode
    # Setting the depth confidence parameters
    runtime_parameters.confidence_threshold = 100
    runtime_parameters.textureness_confidence_threshold = 100

    # Capture 150 images and depth, then stop
    image = sl.Mat()
    depth = sl.Mat()
    point_cloud = sl.Mat()

    mirror_ref = sl.Transform()
    mirror_ref.set_translation(sl.Translation(2.75,4.0,0))
    tr_np = mirror_ref.m

    while True:
        # A new image is available if grab() returns SUCCESS
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            
            zed.retrieve_image(image, sl.VIEW.LEFT)
            zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)

            x = round(image.get_width() / 2)
            y = round(image.get_height() / 2)
            err, point_cloud_value = point_cloud.get_value(x, y)

            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

            point_cloud_np = point_cloud.get_data()
            point_cloud_np.dot(tr_np)

            if not np.isnan(distance) and not np.isinf(distance):
                print("Distance to Camera at ({}, {}) (image center): {:1.3} m".format(x, y, distance), end="\r")
                zed.close()
            else:
                print("Can't estimate distance at this position.")
                print("Your camera is probably too close to the scene, please move it backwards.\n")
            sys.stdout.flush()
Пример #13
0
    def update(self): 
        dot_ = sl.Translation.dot_translation(self.vertical_, self.up_)
        if(dot_ < 0.):
            tmp = self.vertical_.get()
            self.vertical_.init_vector(tmp[0] * -1.,tmp[1] * -1., tmp[2] * -1.)
        transformation = sl.Transform()

        tmp_position = self.position_.get()
        tmp = (self.offset_ * self.orientation_).get()
        new_position = sl.Translation()
        new_position.init_vector(tmp_position[0] + tmp[0], tmp_position[1] + tmp[1], tmp_position[2] + tmp[2])
        transformation.init_orientation_translation(self.orientation_, new_position)
        transformation.inverse()
        self.vpMatrix_ = self.projection_ * transformation
Пример #14
0
def get_zed_pose(zed, verbose=False):
    """

    Parameters
    ----------
    zed: sl.Camera
        The ZED camera object
    verbose: bool
        If true, will print the translation + orientation to the console

    Returns
    -------
    list
        Pose as [time, tx, ty, tz, ox, oy, oz, ow]
        time is given in seconds.
        The camera position is [tx, ty, tz]
        And orientation is the quaternion [ox, oy, oz, ow]

    """
    # Call zed.grab() each time before calling this function
    zed_pose = sl.Pose()

    # Get the pose of the camera relative to the world frame
    state = zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
    # Display translation and timestamp
    py_translation = sl.Translation()
    tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
    ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
    tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
    time_nsec = zed_pose.timestamp.get_nanoseconds()
    time_sec = float(time_nsec) / 1e9
    if verbose:
        print(
            "Translation: tx: {0}, ty:  {1}, tz:  {2}, timestamp: {3}".format(
                tx, ty, tz, time_sec))
    # Display orientation quaternion
    py_orientation = sl.Orientation()
    ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
    oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
    oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
    ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
    if verbose:
        print("Orientation: ox: {0}, oy:  {1}, oz: {2}, ow: {3}\n".format(
            ox, oy, oz, ow))
    return [time_sec, tx, ty, tz, ox, oy, oz, ow]
Пример #15
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform()  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    #added! 
    path = '/media/nvidia/SD1/position.csv'
    position_file = open(path,'w')
    
    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            zed.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            position_file.write("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))

	    # Display the Rotation Matrix
            py_rotationMatrix = zed_pose.get_rotation_matrix()
            position_file.write("Got Rotation Matrix, but did not print\n")

	    # Display the Rotation Vector
            py_rotationVector = zed_pose.get_rotation_vector()
            rx = round(py_rotationVector[0], 3)
            ry = round(py_rotationVector[1], 3)
            rz = round(py_rotationVector[2], 3)
            position_file.write("Rotation Vector: Rx: {0}, Ry: {1}, Rz {2}, Timestamp: {3}\n".format(rx, ry, rz, zed_pose.timestamp))

	    # Display the Euler Angles
            py_eulerAngles = zed_pose.get_euler_angles()
            ex = round(py_eulerAngles[0], 3)
            ey = round(py_eulerAngles[1], 3)
            ez = round(py_eulerAngles[2], 3)
            position_file.write("EulerAngles: EAx: {0}, EAy: {1}, EAz {2}, Timestamp: {3}\n".format(ex, ey, ez, zed_pose.timestamp))



            
            i = i + 1

    # Close the camera
    zed.close()

    # Close file
    position_file.close()
Пример #16
0
    def __init__(self):
        self.ORIGINAL_FORWARD = sl.Translation()
        self.ORIGINAL_FORWARD.init_vector(0,0,1)
        self.ORIGINAL_UP = sl.Translation()
        self.ORIGINAL_UP.init_vector(0,1,0)
        self.ORIGINAL_RIGHT = sl.Translation()
        self.ORIGINAL_RIGHT.init_vector(1,0,0)
        self.znear = 0.5
        self.zfar = 100.
        self.horizontalFOV = 70.
        self.orientation_ = sl.Orientation()
        self.position_ = sl.Translation()
        self.forward_ = sl.Translation()
        self.up_ = sl.Translation()
        self.right_ = sl.Translation()
        self.vertical_ = sl.Translation()
        self.vpMatrix_ = sl.Matrix4f()
        self.offset_ = sl.Translation()
        self.offset_.init_vector(0,0,5)
        self.projection_ = sl.Matrix4f()
        self.projection_.set_identity()
        self.setProjection(1.78)

        self.position_.init_vector(0., 0., 0.)
        tmp = sl.Translation()
        tmp.init_vector(0, 0, -.1)
        tmp2 = sl.Translation()
        tmp2.init_vector(0, 1, 0)
        self.setDirection(tmp, tmp2)        
Пример #17
0
def main():

    #if len(sys.argv) != 2:
    #    print("Please specify path to .svo file.")
    #    exit()

    #filepath = sys.argv[1]
    #print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    #init = sl.InitParameters(svo_input_filename=filepath)
    init = sl.InitParameters()

    #new
    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    # Use a right-handed Y-up coordinate system
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #from  Positional Tracking:
    # Track the camera position until Keyboard Interupt (ctrl-C)
    #i = 0
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    path = '/media/nvidia/SD1/translation.csv'
    position_file = open(path, 'w')
    #END from positional tracking

    pymesh = sl.Mesh()
    print("Processing...")
    #for i in range(200):
    while True:
        try:
            cam.grab(runtime)
            cam.request_mesh_async()
            # Get the pose of the left eye of the camera with reference to the world frame
            cam.get_position(zed_pose,
                             sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            #position_file.write("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))
            position_file.write("{0},{1},{2},{3}\n".format(
                tx, ty, tz, zed_pose.timestamp))

        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()

            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            #print_mesh_information(pymesh, apply_texture)

            #save_filter(filter_params)
            #save_mesh(pymesh)
            cam.close()
            position_file.close()
            #save_position(path)
            save_all(filter_params, pymesh, path)
            print("\nFINISH")
            raise
Пример #18
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = zed.enable_positional_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()

    zed_sensors = sl.SensorsData()
    runtime_parameters = sl.RuntimeParameters()

    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
            zed.get_sensors_data(zed_sensors, sl.TIME_REFERENCE.IMAGE)
            zed_imu = zed_sensors.get_imu_data()

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".
                  format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                ox, oy, oz, ow))

            #Display the IMU acceleratoin
            acceleration = [0, 0, 0]
            zed_imu.get_linear_acceleration(acceleration)
            ax = round(acceleration[0], 3)
            ay = round(acceleration[1], 3)
            az = round(acceleration[2], 3)
            print("IMU Acceleration: Ax: {0}, Ay: {1}, Az {2}\n".format(
                ax, ay, az))

            #Display the IMU angular velocity
            a_velocity = [0, 0, 0]
            zed_imu.get_angular_velocity(a_velocity)
            vx = round(a_velocity[0], 3)
            vy = round(a_velocity[1], 3)
            vz = round(a_velocity[2], 3)
            print("IMU Angular Velocity: Vx: {0}, Vy: {1}, Vz {2}\n".format(
                vx, vy, vz))

            # Display the IMU orientation quaternion
            zed_imu_pose = sl.Transform()
            ox = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[0], 3)
            oy = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[1], 3)
            oz = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[2], 3)
            ow = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[3], 3)
            print(
                "IMU Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                    ox, oy, oz, ow))

            i = i + 1

    # Close the camera
    zed.close()
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init_params.coordinate_units = sl.UNIT.METER
    init_params.sdk_verbose = True

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)
        
    obj_param = sl.ObjectDetectionParameters()
    obj_param.enable_tracking=True
    obj_param.image_sync=True
    obj_param.enable_mask_output=True

    camera_infos = zed.get_camera_information()
    if obj_param.enable_tracking :
        positional_tracking_param = sl.PositionalTrackingParameters()
        #positional_tracking_param.set_as_static = True
        positional_tracking_param.set_floor_as_origin = True
        zed.enable_positional_tracking(positional_tracking_param)

    print("Object Detection: Loading Module...")

    err = zed.enable_object_detection(obj_param)
    if err != sl.ERROR_CODE.SUCCESS :
        print (repr(err))
        zed.close()
        exit(1)

    objects = sl.Objects()
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    obj_runtime_param.detection_confidence_threshold = 40
    
    while zed.grab() == sl.ERROR_CODE.SUCCESS:
        err = zed.retrieve_objects(objects, obj_runtime_param)
        start = timeit.default_timer()
        if objects.is_new :
            obj_array = objects.object_list
            if len(obj_array) > 0 :
                first_object = obj_array[0]
                print("First object attributes:")
                print(" Label '"+repr(first_object.label)+"' (conf. "+str(int(first_object.confidence))+"/100)")
                position = first_object.position
                dimensions = first_object.dimensions
                print(" 3D position: [{0},{1},{2}]\n 3D dimentions: [{3},{4},{5}]".format(position[0],position[1],position[2],dimensions[0],dimensions[1],dimensions[2]))

                ######################
                image = sl.Mat()
                depth = sl.Mat()
                point_cloud = sl.Mat()
                mirror_ref = sl.Transform()
                mirror_ref.set_translation(sl.Translation(2.75,4.0,0))
                tr_np = mirror_ref.m
                zed.retrieve_image(image, sl.VIEW.LEFT)
                # Retrieve depth map. Depth is aligned on the left image
                zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
                # Retrieve colored point cloud. Point cloud is aligned on the left image.
                zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
                x = round(image.get_width() / 2)
                y = round(image.get_height() / 2)
                err, point_cloud_value = point_cloud.get_value(x, y)

                distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

                point_cloud_np = point_cloud.get_data()
                point_cloud_np.dot(tr_np)
                if not np.isnan(distance) and not np.isinf(distance):
                    print("Distance to Camera at ({}, {}) (image center): {:1.3} m".format(x, y, distance), end="\r")
                else:
                    pass
                
                print("\n Bounding Box 3D ")
                bounding_box = first_object.bounding_box
                stop = timeit.default_timer()
                print("\n FPS:", stop - start)


    # Close the camera
    zed.close()
Пример #20
0
def capture(imagePath):
    #Create a ZED camera object
    zed = sl.Camera()

    # Set configuration parameters
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_ULTRA  # Use ULTRA depth mode
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Use meter units (for depth measurements)
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720

    #Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        print("[DECON] Error: Unable to open the camera")
        zed.close()
        exit(-1)

    try:
        image = sl.Mat()
        depth_map = sl.Mat()
        point_cloud = sl.Mat()
        mirror_ref = sl.Transform()
        mirror_ref.set_translation(sl.Translation(2.75, 4.0, 0))
        tr_np = mirror_ref.m
        runtime_parameters = sl.RuntimeParameters()
        runtime_parameters.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD  # Use STANDARD sensing mode
        # Setting the depth confidence parameters
        #runtime_parameters.confidence_threshold = 100
        #	runtime_parameters.textureness_confidence_threshold = 100
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # A new image and depth is available if grab() returns SUCCESS
            zed.retrieve_image(image, sl.VIEW.VIEW_LEFT)  # Retrieve left image
            img = image.get_data()
            im = Image.fromarray(img).rotate(180)
            # Retrieve depth map. Depth is aligned on the left image
            zed.retrieve_measure(depth_map, sl.MEASURE.MEASURE_DEPTH)
            # Retrieve colored point cloud. Point cloud is aligned on the left image.
            zed.retrieve_measure(point_cloud, sl.MEASURE.MEASURE_XYZRGBA)

            # Get and print distance value in mm at the center of the image
            # We measure the distance camera - object using Euclidean distance
            x = round(image.get_width() / 2)
            y = round(image.get_height() / 2)
            err, point_cloud_value = point_cloud.get_value(x, y)

            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

            point_cloud_np = point_cloud.get_data()
            point_cloud_np.dot(tr_np)

            if not np.isnan(distance) and not np.isinf(distance):
                #print("Distance to camera at (" + str(x) + ", " + str(y) + ") (image center): " + str(distance))
                #print("Image size is: ",im.size)
                (w, h) = im.size
                k = 2 * distance * tan(radians(30)) / h
                s = 2 * distance * tan(radians(45)) / w
                print w, h, distance, s, k
                #print("Vertical m/pixel:",str(k))
                #print("Horizontal m/pixel:",str(s))
            else:
                print("Can't estimate distance at this position.")
                print(
                    "Your camera is probably too close to the scene, please move it backwards.\n"
                )
                torch.cuda.empty_cache()
                zed.close()
                exit(-1)
            sys.stdout.flush()
            im.save(imagePath)
            del im
    except Exception as e:
        print("[DECON] Error: Exception occured")
        print(e)
    finally:
        torch.cuda.empty_cache()
        zed.close()
Пример #21
0
def zedGrabber(zedFramesQueue):

    svo_path = "HD1080.svo"
    zed_id = 0
    init_mode = 0

    input_type = sl.InputType()
    if init_mode == 0:
        input_type.set_from_svo_file(svo_path)
    else:
        input_type.set_from_camera_id(zed_id)

    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080
    init.camera_fps = 30
    init.coordinate_units = sl.UNIT.METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Z_UP

    cam = sl.Camera()
    if not cam.is_opened():
        print("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        log.error(repr(status))
        exit()

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = cam.enable_positional_tracking(tracking_parameters)

    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    zed_pose = sl.Pose()
    runtime = sl.RuntimeParameters()

    # Use STANDARD sensing mode
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    mat = sl.Mat()
    point_cloud_mat = sl.Mat()

    # Import the global variables. This lets us instance Darknet once,

    while True:

        start_time = time.time()  # start time of the loop
        err = cam.grab(runtime)

        if (err != sl.ERROR_CODE.SUCCESS):
            break

        cam.retrieve_image(mat, sl.VIEW.LEFT)
        image = mat.get_data()

        cam.retrieve_measure(point_cloud_mat, sl.MEASURE.XYZRGBA)
        depth = point_cloud_mat.get_data()
        cam.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)

        py_translation = sl.Translation()
        tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
        ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
        tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
        camPosition = (tx, ty, tz)
        #print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}".format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))
        camOrientation = zed_pose.get_rotation_vector() * 180 / math.pi
        image = rotate(image, -camOrientation[1], center=None, scale=1.0)
        FPS = 1.0 / (time.time() - start_time)
        # Do the detection
        #rawDetections = detect(netMain, metaMain, image, thresh)
        #detectionsList = detectionsAnalayzer(rawDetections, depth)
        frame = zedFrame(image, depth, camOrientation, camPosition, FPS)
        zedFramesQueue.put(frame)
        #image = cvDrawBoxes(detectionsList, image)
        #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        #image = cv2.resize(image,(1280,720),interpolation=cv2.INTER_LINEAR)
    cam.close()
def main():
    # global stop_signal
    # signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,#■
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
            exit(1)
        #tracing enable
        py_transform = cams.transform_list[index]
        print("PositionalTrackingParameters start")
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        print("PositionalTrackingParameters end")
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        runtime = sl.RuntimeParameters()
        cams.runtime_list.append(runtime)
        index = index + 1

    #Start camera threads
    # for index in range(0, len(cams.zed_list)):
    #     if cams.zed_list[index].is_opened():
    #         thread_list.append(threading.Thread(target=grab_run, args=(cams,index,)))
    #         thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        fd_cam = f'{basePath}/{cams.name_list[index]}'
        os.makedirs(fd_cam, exist_ok=True)
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr, distortion = get_camera_intrintic_info(cams.zed_list[index])
        filename = f'{fd_cam}/camera-intrinsics.csv'
        np.savetxt(filename, cam_intr)
        filename = f'{fd_cam}/camera-distortion.csv'
        np.savetxt(filename, distortion)

    #*******************************************************************
    take_by_keyinput(cameras, cams)
    # take_by_keyinput_camera_view(cameras, cams)
    #*******************************************************************
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
def main():

    #OPENCV setup
    lowerBound = np.array([33, 80, 40])
    upperBound = np.array([102, 255, 255])
    red = (255, 0, 0)
    distance = 1
    xcenter = 1
    center = 0

    kernelOpen = np.ones((5, 5))
    kernelClose = np.ones((20, 20))

    #ZED SETUP

    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.UNIT.MILLIMETER  # Use milliliter units (for depth measurements)

    #OPEN CAMERA

    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Create and set RuntimeParameters after opening the camera
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.STANDARD  # Use STANDARD sensing mode

    # Capture 50 images and depth, then stop
    i = 0
    image = sl.Mat()
    depth = sl.Mat()
    point_cloud = sl.Mat()

    mirror_ref = sl.Transform()
    mirror_ref.set_translation(sl.Translation(2.75, 4.0, 0))
    tr_np = mirror_ref.m

    while i < 50:
        # A new image is available if grab() returns SUCCESS
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.VIEW.LEFT)

            #convert image to openCV
            image_zed = sl.Mat(
                zed.get_camera_information().camera_resolution.width,
                zed.get_camera_information().camera_information.height,
                sl.MAT_TYPE.U8_C4)
            image_ocv = image_zed.get_data()

            ret, image = img_ocv.read()

            # Retrieve depth map. Depth is aligned on the left image
            zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
            # Retrieve colored point cloud. Point cloud is aligned on the left image.
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)

            # Get and print distance value in mm at the center of the image
            # We measure the distance camera - object using Euclidean distance
            x = round(image.get_width() / 2)
            y = round(image.get_height() / 2)
            err, point_cloud_value = point_cloud.get_value(x, y)

            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

            point_cloud_np = point_cloud.get_data()
            point_cloud_np.dot(tr_np)

            if not np.isnan(distance) and not np.isinf(distance):
                distance = round(distance)
                print("Distance to Camera at ({0}, {1}): {2} mm\n".format(
                    x, y, distance))
                # Increment the loop
                i = i + 1
            else:
                print(
                    "Can't estimate distance at this position, move the camera\n"
                )
            sys.stdout.flush()

    # Close the camera
    zed.close()
Пример #24
0
    def open_zed(self, event):
        self.zed = sl.Camera()

        # Create a InitParameters object and set configuration parameters
        init_params = sl.InitParameters()
        init_params.depth_mode = sl.DEPTH_MODE.QUALITY  # Use PERFORMANCE depth mode
        init_params.coordinate_units = sl.UNIT.METER  # Use meter units (for depth measurements)
        init_params.camera_resolution = sl.RESOLUTION.VGA  # 此处修改图像尺寸
        init_params.camera_fps = 100

        # Open the camera
        err = self.zed.open(init_params)
        if err != sl.ERROR_CODE.SUCCESS:
            exit(1)

        # Create and set RuntimeParameters after opening the camera
        runtime_parameters = sl.RuntimeParameters()
        runtime_parameters.sensing_mode = sl.SENSING_MODE.STANDARD  # Use STANDARD sensing mode
        # Setting the depth confidence parameters
        runtime_parameters.confidence_threshold = 100
        runtime_parameters.textureness_confidence_threshold = 100

        # Capture 150 images and depth, then stop
        i = 0
        self.image = sl.Mat()
        # cv2.imwrite("i1.jpg", MatrixToImage(image))
        self.depth = sl.Mat()
        # cv2.imwrite("d1.jpg", MatrixToImage(depth))
        self.point_cloud = sl.Mat()

        mirror_ref = sl.Transform()
        mirror_ref.set_translation(sl.Translation(2.75, 4.0, 0))
        tr_np = mirror_ref.m

        while True:
            # A new image is available if grab() returns SUCCESS
            if self.zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
                # Retrieve left image
                self.zed.retrieve_image(self.image, sl.VIEW.LEFT)
                # Retrieve depth map. Depth is aligned on the left image
                self.zed.retrieve_measure(self.depth, sl.MEASURE.DEPTH)
                # Retrieve colored point cloud. Point cloud is aligned on the left image.
                self.zed.retrieve_measure(self.point_cloud, sl.MEASURE.XYZRGBA)
                id = self.image.get_data()
                # dd = self.depth.get_data()
                im = img.fromarray(id)
                imi = im.convert('RGB')
                # im = img.fromarray(dd)
                # imd = im.convert('RGB')
                self.frame = imi
                size = imi.size
                image1 = cv2.cvtColor(np.asarray(imi), cv2.COLOR_BGR2RGB)
                pic = Bitmap.FromBuffer(size[0], size[1], image1)
                self.bmp.SetBitmap(pic)
                self.grid_bag_sizer.Fit(self)
                # im.save("test2.jpg")
                # Get and print distance value in mm at the center of the image
                # We measure the distance camera - object using Euclidean distance
                x = round(self.image.get_width() / 2)
                y = round(self.image.get_height() / 2)
                err, point_cloud_value = self.point_cloud.get_value(x, y)

                distance = math.sqrt(
                    point_cloud_value[0] * point_cloud_value[0] +
                    point_cloud_value[1] * point_cloud_value[1] +
                    point_cloud_value[2] * point_cloud_value[2])

                point_cloud_np = self.point_cloud.get_data()
                point_cloud_np.dot(tr_np)

                if not np.isnan(distance) and not np.isinf(distance):
                    print(
                        "Distance to Camera at ({}, {}) (image center): {:1.3} m"
                        .format(x, y, distance),
                        end="\r")
                    # Increment the loop
                    i = i + 1
                else:
                    print("Can't estimate distance at this position.")
                    print(
                        "Your camera is probably too close to the scene, please move it backwards.\n"
                    )
                sys.stdout.flush()

        # Close the camera
        self.zed.close()
Пример #25
0
def main():
    #ZED自带程序,照搬过来
    print("Running...")
    #init = zcam.PyInitParameters()
    #zed = zcam.PyZEDCamera()
    '''new'''
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.UNIT.MILLIMETER  # Use meter units (for depth measurements)
    init_params.camera_resolution = sl.RESOLUTION.HD2K

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)
    '''new'''
    '''
    #获取深度,点云等数据
    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()
    depth = core.PyMat()
    point_cloud = core.PyMat()
    '''
    '''new'''
    # Create and set RuntimeParameters after opening the camera
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.STANDARD  # Use STANDARD sensing mode
    # Setting the depth confidence parameters
    runtime_parameters.confidence_threshold = 100
    runtime_parameters.textureness_confidence_threshold = 100

    # Capture 150 images and depth, then stop
    i = 0
    mat = sl.Mat()  #image -> mat
    depth = sl.Mat()
    point_cloud = sl.Mat()

    mirror_ref = sl.Transform()
    mirror_ref.set_translation(sl.Translation(2.75, 4.0, 0))
    tr_np = mirror_ref.m
    '''new'''
    '''
    #ZED的参数设置
    init_params = zcam.PyInitParameters()
    init_params.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_MILLIMETER  # Use milliliter units (for depth measurements)
    #改变相机的模式,VGA分辨率低但是速度会快很多
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_VGA 
    init_params.camera_fps = 100
    '''

    key = ''
    while key != 113:  # for 'q' key
        #err = zed.grab(runtime)
        #if err == tp.PyERROR_CODE.PySUCCESS:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            zed.retrieve_image(mat, sl.VIEW.LEFT)
            zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)

            frame = mat.get_data()
            t1 = cv2.getTickCount()
            #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #因为ZED是双目相机,所以这里识别部分只使用左镜头的图像
            frame = cv2.resize(
                frame, (int(mat.get_width() / 2), int(mat.get_height() / 2)),
                interpolation=cv2.INTER_AREA)
            # SURF detect
            KP, Desc = surf.detectAndCompute(frame, None)
            matches = flann.knnMatch(Desc, trainDesc, k=2)

            goodMatch = []
            for m, n in matches:
                if (m.distance < 0.8 * n.distance):
                    goodMatch.append(m)
            if (len(goodMatch) > MIN_MATCH_COUNT):
                yp = []
                qp = []
                for m in goodMatch:
                    yp.append(trainKP[m.trainIdx].pt)
                    qp.append(KP[m.queryIdx].pt)
                yp, qp = np.float32((yp, qp))
                H, status = cv2.findHomography(yp, qp, cv2.RANSAC, 3.0)
                h, w = trainImg.shape
                trainBorder = np.float32([[[0, 0], [0, h - 1], [w - 1, h - 1],
                                           [w - 1, 0]]])
                imgBorder = cv2.perspectiveTransform(trainBorder, H)
                cv2.polylines(frame, [np.int32(imgBorder)], True, (0, 255, 0),
                              3)  #在imshow的图像上显示出识别的框
                # get coordinate 获得目标物体bbox的坐标并计算出中心点坐标
                c1 = imgBorder[0, 0]
                c2 = imgBorder[0, 1]
                c3 = imgBorder[0, 2]
                c4 = imgBorder[0, 3]
                xmin = min(c1[0], c2[0])
                xmax = max(c3[0], c4[0])
                ymin = min(c1[1], c4[1])
                ymax = max(c2[1], c3[1])

                #distance_point_cloud
                x = round(xmin + xmax)
                y = round(ymin + ymax)

                if (x < 0 or y < 0):
                    x = 0
                    y = 0

                #计算出的中心点坐标后,来获取点云数据
                err, point_cloud_value = point_cloud.get_value(x, y)
                #由点云数据计算出和相机的距离(左相机为原点)
                distance = math.sqrt(
                    point_cloud_value[0] * point_cloud_value[0] +
                    point_cloud_value[1] * point_cloud_value[1] +
                    point_cloud_value[2] * point_cloud_value[2])
                #把距离打在屏幕里

                if not np.isnan(distance) and not np.isinf(distance):
                    distance = round(distance)
                    print("Distance to Camera at ({0}, {1}): {2} mm\n".format(
                        x, y, distance))
                    Z = "distance:{} mm".format(distance)
                    cv2.putText(frame, Z, (xmax, ymax), font, 0.7,
                                (255, 255, 255), 2, cv2.LINE_AA)

                # Increment the loop
                else:
                    print(
                        "Can't estimate distance at this position, move the camera\n"
                    )
            else:
                print("Not Eough match")
            sys.stdout.flush()
            t2 = cv2.getTickCount()
            fps = cv2.getTickFrequency() / (t2 - t1)
            fps = "Camera FPS: {0}.".format(fps)
            cv2.putText(frame, fps, (25, 25), font, 0.5, (255, 255, 255), 2,
                        cv2.LINE_AA)
            cv2.imshow("ZED", frame)
            key = cv2.waitKey(5)
        else:
            key = cv2.waitKey(5)

    cv2.destroyAllWindows()

    zed.close()
    print("\nFINISH")
Пример #26
0
def main():
    imu = PX4Data()
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE
    init_params.camera_resolution = sl.RESOLUTION.RESOLUTION_VGA  # Use HD1080 video mode
    init_params.camera_fps = 120  # Set fps at 60
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Z_UP_X_FWD
    init_params.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)


    # Enable positional tracking with default parameters
    py_transform = sl.Transform()  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.TrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Capture 50 frames and stop
    i = 0
    image = sl.Mat()
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD  # Use STANDARD sensing mode
    prevTimeStamp = 0
    file = open('data/data.txt', 'w')
    key = 0
    depth = sl.Mat()
    point_cloud = sl.Mat()
    pcList = []
    while key != 113:
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS: # A new image is available if grab() returns SUCCESS
            timestamp = zed.get_timestamp(sl.TIME_REFERENCE.TIME_REFERENCE_CURRENT)  # Get the timestamp at the time the image was
            dt = (timestamp - prevTimeStamp) * 1.0 / 10 ** 9
            if dt > 0.03:
                # Get the pose of the left eye of the camera with reference to the world frame
                zed.get_position(zed_pose, sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)

                # Display the translation and timestamp
                py_translation = sl.Translation()
                gnd_pos = zed_pose.get_translation(py_translation).get()
                tx = round(gnd_pos[0], 3)
                ty = round(gnd_pos[1], 3)
                tz = round(gnd_pos[2], 3)
                print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".format(tx, ty, tz, zed_pose.timestamp))

                # Display the orientation quaternion
                py_orientation = sl.Orientation()
                quat = zed_pose.get_orientation(py_orientation).get()
                ox = round(quat[0], 3)
                oy = round(quat[1], 3)
                oz = round(quat[2], 3)
                ow = round(quat[3], 3)
                print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(ox, oy, oz, ow))

                zed.retrieve_image(image, sl.VIEW.VIEW_LEFT)
                img = image.get_data()
                cv2.imwrite('data/images/' + str(timestamp) + '.png', img)

                zed.retrieve_measure(depth, sl.MEASURE.MEASURE_DEPTH)
                # Retrieve colored point cloud. Point cloud is aligned on the left image.
                zed.retrieve_measure(point_cloud, sl.MEASURE.MEASURE_XYZRGBA)
                print(point_cloud.get_data().shape)
                pc = np.reshape(point_cloud.get_data(), (1, 376, 672, 4))
                pcList.append(pc)

                cv2.imshow("ZED", img)
                key = cv2.waitKey(1)

                prevTimeStamp = timestamp
                print(dt)
                print("Image resolution: {0} x {1} || Image timestamp: {2}\n".format(image.get_width(), image.get_height(), timestamp))

                file.write('%d '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f '
                           '%.4f %.4f %.4f %.4f \n' % (timestamp, tx, ty, tz, ox, oy, oz, ow,
                                                       imu.acc.x, imu.acc.y, imu.acc.z,
                                                       imu.gyr.x, imu.gyr.y, imu.gyr.z,
                                                       imu.gps.x, imu.gps.y, imu.gps.z,
                                                       imu.vel.x, imu.vel.y, imu.vel.z,
                                                       imu.quat.x, imu.quat.y, imu.quat.z, imu.quat.w))
                i = i + 1

    # Close the camera
    pc = np.concatenate(pcList, axis=0)
    np.save('pc', pc)
    zed.close()
    file.close()
    imu.close()
Пример #27
0
def main():

    if not sys.argv or len(sys.argv) != 2:
        print(
            "Only the path of the output SVO file should be passed as argument."
        )
        exit(1)

    cam = sl.Camera()
    init = sl.InitParameters()

    init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD720  # Use HD720 video mode (default
    init.coordinate_system = sl.COORDINATE_SYSTEM.COORDINATE_SYSTEM_RIGHT_HANDED_Y_UP  # Use a right-handed Y-up coordinate system
    init.coordinate_units = sl.UNIT.UNIT_METER  # Set units in meters

    # new for SVO
    init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_NONE
    ##

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.TrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    #from  Positional Tracking:
    # Track the camera position until Keyboard Interupt (ctrl-C)
    zed_pose = sl.Pose()
    zed_imu = sl.IMUData()
    runtime_parameters = sl.RuntimeParameters()

    path = '/media/nvidia/SD1/translation.csv'
    position_file = open(path, 'w')
    #END from positional tracking

    pymesh = sl.Mesh()
    print("Processing...")

    #new for SVO
    path_output = sys.argv[1]
    err = cam.enable_recording(
        path_output, sl.SVO_COMPRESSION_MODE.SVO_COMPRESSION_MODE_AVCHD)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)
    print("SVO is Recording, use Ctrl-C to stop.")
    frames_recorded = 0
    ##

    while True:
        try:
            cam.grab(runtime)

            # new for SVO
            state = cam.record()
            if state["status"]:
                frames_recorded += 1
            print("Frame count: " + str(frames_recorded), end="\r")
            ##

            cam.request_mesh_async()
            # Get the pose of the left eye of the camera with reference to the world frame
            cam.get_position(zed_pose,
                             sl.REFERENCE_FRAME.REFERENCE_FRAME_WORLD)
            cam.get_imu_data(zed_imu, sl.TIME_REFERENCE.TIME_REFERENCE_IMAGE)

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            position_file.write("{0},{1},{2},{3}\n".format(
                tx, ty, tz, zed_pose.timestamp))

        except KeyboardInterrupt:
            cam.extract_whole_mesh(pymesh)
            cam.disable_tracking()
            cam.disable_spatial_mapping()
            # new for .svo
            cam.disable_recording()
            ##
            filter_params = sl.MeshFilterParameters()
            filter_params.set(sl.MESH_FILTER.MESH_FILTER_HIGH)
            print("Filtering params : {0}.".format(
                pymesh.filter(filter_params)))

            apply_texture = pymesh.apply_texture(
                sl.MESH_TEXTURE_FORMAT.MESH_TEXTURE_RGBA)
            print("Applying texture : {0}.".format(apply_texture))
            print_mesh_information(pymesh, apply_texture)

            save_filter(filter_params)
            save_mesh(pymesh)
            cam.close()
            position_file.close()
            save_position(path)
            print("\nFINISH")
            raise
def main():
    global stop_signal
    thread_list = []
    signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
        #tracing enable
        py_transform = cams.transform_list[index]
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        index = index + 1

    #Start camera threads
    for index in range(0, len(cams.zed_list)):
        if cams.zed_list[index].is_opened():
            thread_list.append(
                threading.Thread(target=grab_run, args=(
                    cams,
                    index,
                )))
            thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr = get_camera_intrintic_info(cams.zed_list[index])
        filename = path + cams.name_list[index] + "-camera-intrinsics.txt"
        df = pd.DataFrame(cam_intr)
        df.to_csv(filename, sep=' ', header=None, index=None)

    #*******************************************************************

    index = 0
    take_cam_id = 0
    for cam in cameras:
        get_cam_data(cams, index, take_cam_id)
        index += 1

    stop_signal = True
    #*******************************************************************
    '''
    key = ' '
    take_cam_id=0
    while key != 113 :

        index=0
        image_ocv_cat=None
        depth_image_ocv_cat=None
        for cam in cameras:
            image_ocv, depth_image_ocv=get_cam_color_depth(cams,index)
            if image_ocv_cat is None:
                image_ocv_cat=image_ocv
                depth_image_ocv_cat=depth_image_ocv
            else:
                image_ocv_cat=np.hstack([image_ocv_cat,image_ocv])
                depth_image_ocv_cat=np.hstack([depth_image_ocv_cat,depth_image_ocv])
            index+=1

        cv2.imshow("Image", image_ocv_cat)
        cv2.imshow("Depth", depth_image_ocv_cat)

        key = cv2.waitKey(10)
        if key == 114 or  key == 82:#R
            index = 0
            for cam in cameras:
                # process_key_event(cams,key,index)
                get_cam_data(cams, index,take_cam_id)
                index+=1
            take_cam_id=take_cam_id+1

    cv2.destroyAllWindows()
    '''
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
Пример #29
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.ULTRA  # Use PERFORMANCE depth mode
    init_params.coordinate_units = sl.UNIT.METER  # Use meter units (for depth measurements)
    init_params.camera_resolution = sl.RESOLUTION.HD720

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Create and set RuntimeParameters after opening the camera
    runtime_parameters = sl.RuntimeParameters()
    runtime_parameters.sensing_mode = sl.SENSING_MODE.FILL  # Use STANDARD sensing mode
    # Setting the depth confidence parameters
    runtime_parameters.confidence_threshold = 50
    runtime_parameters.textureness_confidence_threshold = 100

    # Capture 150 images and depth, then stop
    i = 0
    image = sl.Mat()
    depth = sl.Mat()
    point_cloud = sl.Mat()

    mirror_ref = sl.Transform()
    mirror_ref.set_translation(sl.Translation(2.75, 4.0, 0))
    tr_np = mirror_ref.m

    while True:
        # A new image is available if grab() returns SUCCESS
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.VIEW.LEFT)
            # Retrieve depth map. Depth is aligned on the left image
            zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
            # Retrieve colored point cloud. Point cloud is aligned on the left image.
            zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)

            # Get and print distance value in mm at the center of the image
            # We measure the distance camera - object using Euclidean distance
            x = round(image.get_width() / 2)
            y = round(image.get_height() / 2)
            err, point_cloud_value = point_cloud.get_value(x, y)

            distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

            point_cloud_np = point_cloud.get_data()
            point_cloud_np.dot(tr_np)

            # Use get_data() to get the numpy array
            image_ocv = image.get_data()
            # image_ocv = cv.cvtColor(image_ocv, cv.COLOR_BGR2GRAY)
            # image_ocv = cv.bitwise_not(image_ocv)

            image_ocv = cv.circle(image_ocv, (x, y),
                                  radius=4,
                                  color=(0, 255, 0),
                                  thickness=2)

            if not np.isnan(distance) and not np.isinf(distance):
                text = "Distance to Camera at ({}, {}) (image center): {:1.3} m".format(
                    x, y, distance)
                print(text, end="\r")
                cv.putText(image_ocv, text, (10, 30), cv.FONT_HERSHEY_SIMPLEX,
                           0.5, (0, 255, 0), 1)

            else:
                print("Can't estimate distance at this position.")
                print(
                    "Your camera is probably too close to the scene, please move it backwards.\n"
                )

            # Display the left image from the numpy array
            cv.imshow("Image", image_ocv)
            key = cv.waitKey(1)
            if key == 27:  # esc
                break

            sys.stdout.flush()

    # Close the camera
    zed.close()
Пример #30
0
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    tracking_params = sl.PositionalTrackingParameters()
    zed.enable_positional_tracking(tracking_params)

    runtime = sl.RuntimeParameters()
    camera_pose = sl.Pose()

    camera_info = zed.get_camera_information()
    # Create OpenGL viewer
    viewer = gl.GLViewer()
    viewer.init(len(sys.argv), sys.argv, camera_info.camera_model)

    py_translation = sl.Translation()
    pose_data = sl.Transform()

    text_translation = ""
    text_rotation = ""

    while viewer.is_available():
        if zed.grab(runtime) == sl.ERROR_CODE.SUCCESS:
            tracking_state = zed.get_position(camera_pose)
            if tracking_state == sl.POSITIONAL_TRACKING_STATE.OK:
                rotation = camera_pose.get_rotation_vector()
                translation = camera_pose.get_translation(py_translation)
                text_rotation = str(
                    (round(rotation[0], 2), round(rotation[1],
                                                  2), round(rotation[2], 2)))
                text_translation = str(