def main():

    init = sl.InitParameters(camera_resolution=sl.RESOLUTION.HD720,
                                 depth_mode=sl.DEPTH_MODE.PERFORMANCE,
                                 coordinate_units=sl.UNIT.METER,
                                 coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP,
                                 sdk_verbose=True)
    cam = sl.Camera()
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    transform = sl.Transform()
    tracking_params = sl.PositionalTrackingParameters(transform)
    cam.enable_positional_tracking(tracking_params)

    runtime = sl.RuntimeParameters()
    camera_pose = sl.Pose()

    viewer = tv.PyTrackingViewer()
    viewer.init()

    py_translation = sl.Translation()

    start_zed(cam, runtime, camera_pose, viewer, py_translation)

    viewer.exit()
    glutMainLoop()
示例#2
0
def init_params(menu, cam_id=0):
    global save_dir
    zed = EasyDict({})
    zed.cam = sl.Camera()
    zed.mat = EasyDict({
        'pose': sl.Pose(),
        'translation': sl.Translation(),
        'transform': sl.Transform(),
        'image': sl.Mat(),  # image_map
        'depth': sl.Mat(),  # depth_map
        'point_cloud': sl.Mat(),
        'sensors': sl.SensorsData()  # sensors_data
    })

    zed.param = EasyDict({
        'init':
        sl.InitParameters(
            camera_resolution=mode.resolution[menu.cam.resolution],
            depth_mode=mode.depth[menu.mode.depth],
            coordinate_units=mode.unit[menu.unit],
            coordinate_system=mode.coordinate_system[menu.coordinate_system],
            depth_minimum_distance=menu.depth_range.min,
            depth_maximum_distance=menu.depth_range.max,
            sdk_verbose=verbose),
        'runtime':
        sl.RuntimeParameters(sensing_mode=mode.sensing[menu.mode.sensing]),
        'tracking':
        sl.PositionalTrackingParameters(zed.mat.transform)
    })

    #######
    zed.param.init.set_from_camera_id(cam_id)
    save_dir = save_dir_fmt.format(cam_id)
    return zed
示例#3
0
def main():
    cam = sl.Camera()
    init = sl.InitParameters()
    init.depth_mode = sl.DEPTH_MODE.ULTRA
    init.coordinate_units = sl.UNIT.METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP

    if len(sys.argv) == 2:
        filepath = sys.argv[1]
        print("Reading SVO file: {0}".format(filepath))
        init.set_from_svo_file(filepath)

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    runtime.measure3D_reference_frame = sl.REFERENCE_FRAME.WORLD
    spatial = sl.SpatialMappingParameters()

    transform = sl.Transform()
    tracking = sl.PositionalTrackingParameters(transform)
    cam.enable_positional_tracking(tracking)

    pymesh = sl.Mesh()
    pyplane = sl.Plane()
    reset_tracking_floor_frame = sl.Transform()
    found = 0
    print("Processing...")
    i = 0
    while i < 1000:
        if cam.grab(runtime) == sl.ERROR_CODE.SUCCESS:
            err = cam.find_floor_plane(pyplane, reset_tracking_floor_frame)
            if i > 200 and err == sl.ERROR_CODE.SUCCESS:
                found = 1
                print('Floor found!')
                pymesh = pyplane.extract_mesh()
                break
            i += 1

    if found == 0:
        print('Floor was not found, please try with another SVO.')
        cam.close()
        exit(0)

    cam.disable_positional_tracking()
    save_mesh(pymesh)
    cam.close()
    print("\nFINISH")
示例#4
0
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    cam = sl.Camera()
    input_type = sl.InputType()
    input_type.set_from_svo_file(filepath)
    init = sl.InitParameters()
    init.input = input_type
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    spatial = sl.SpatialMappingParameters()
    transform = sl.Transform()
    tracking = sl.PositionalTrackingParameters(transform)

    cam.enable_positional_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    pymesh = sl.Mesh()
    print("Processing...")
    for i in range(200):
        cam.grab(runtime)
        cam.request_spatial_map_async()

    cam.extract_whole_spatial_map(pymesh)
    cam.disable_positional_tracking()
    cam.disable_spatial_mapping()

    filter_params = sl.MeshFilterParameters()
    filter_params.set(sl.MESH_FILTER.HIGH)
    print("Filtering params : {0}.".format(pymesh.filter(filter_params)))

    apply_texture = pymesh.apply_texture(sl.MESH_TEXTURE_FORMAT.RGBA)
    print("Applying texture : {0}.".format(apply_texture))
    print_mesh_information(pymesh, apply_texture)

    save_filter(filter_params)
    save_mesh(pymesh)
    cam.close()
    print("\nFINISH")
示例#5
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = zed.enable_positional_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()

    zed_sensors = sl.SensorsData()
    runtime_parameters = sl.RuntimeParameters()

    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
            zed.get_sensors_data(zed_sensors, sl.TIME_REFERENCE.IMAGE)
            zed_imu = zed_sensors.get_imu_data()

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".
                  format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                ox, oy, oz, ow))

            #Display the IMU acceleratoin
            acceleration = [0, 0, 0]
            zed_imu.get_linear_acceleration(acceleration)
            ax = round(acceleration[0], 3)
            ay = round(acceleration[1], 3)
            az = round(acceleration[2], 3)
            print("IMU Acceleration: Ax: {0}, Ay: {1}, Az {2}\n".format(
                ax, ay, az))

            #Display the IMU angular velocity
            a_velocity = [0, 0, 0]
            zed_imu.get_angular_velocity(a_velocity)
            vx = round(a_velocity[0], 3)
            vy = round(a_velocity[1], 3)
            vz = round(a_velocity[2], 3)
            print("IMU Angular Velocity: Vx: {0}, Vy: {1}, Vz {2}\n".format(
                vx, vy, vz))

            # Display the IMU orientation quaternion
            zed_imu_pose = sl.Transform()
            ox = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[0], 3)
            oy = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[1], 3)
            oz = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[2], 3)
            ow = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[3], 3)
            print(
                "IMU Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                    ox, oy, oz, ow))

            i = i + 1

    # Close the camera
    zed.close()
def main():
    # global stop_signal
    # signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,#■
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
            exit(1)
        #tracing enable
        py_transform = cams.transform_list[index]
        print("PositionalTrackingParameters start")
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        print("PositionalTrackingParameters end")
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        runtime = sl.RuntimeParameters()
        cams.runtime_list.append(runtime)
        index = index + 1

    #Start camera threads
    # for index in range(0, len(cams.zed_list)):
    #     if cams.zed_list[index].is_opened():
    #         thread_list.append(threading.Thread(target=grab_run, args=(cams,index,)))
    #         thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        fd_cam = f'{basePath}/{cams.name_list[index]}'
        os.makedirs(fd_cam, exist_ok=True)
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr, distortion = get_camera_intrintic_info(cams.zed_list[index])
        filename = f'{fd_cam}/camera-intrinsics.csv'
        np.savetxt(filename, cam_intr)
        filename = f'{fd_cam}/camera-distortion.csv'
        np.savetxt(filename, distortion)

    #*******************************************************************
    take_by_keyinput(cameras, cams)
    # take_by_keyinput_camera_view(cameras, cams)
    #*******************************************************************
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
示例#7
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init_params.coordinate_units = sl.UNIT.METER
    init_params.sdk_verbose = True

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    obj_param = sl.ObjectDetectionParameters()
    # Different model can be chosen, optimizing the runtime or the accuracy
    obj_param.detection_model = sl.DETECTION_MODEL.HUMAN_BODY_FAST
    obj_param.enable_tracking = True
    obj_param.image_sync = True
    obj_param.enable_mask_output = False
    # Optimize the person joints position, requires more computations
    obj_param.enable_body_fitting = True

    camera_infos = zed.get_camera_information()
    if obj_param.enable_tracking:
        positional_tracking_param = sl.PositionalTrackingParameters()
        # positional_tracking_param.set_as_static = True
        positional_tracking_param.set_floor_as_origin = True
        zed.enable_positional_tracking(positional_tracking_param)

    print("Object Detection: Loading Module...")

    err = zed.enable_object_detection(obj_param)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        zed.close()
        exit(1)

    objects = sl.Objects()
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    # For outdoor scene or long range, the confidence should be lowered to avoid missing detections (~20-30)
    # For indoor scene or closer range, a higher confidence limits the risk of false positives and increase the precision (~50+)
    obj_runtime_param.detection_confidence_threshold = 40

    while zed.grab() == sl.ERROR_CODE.SUCCESS:
        err = zed.retrieve_objects(objects, obj_runtime_param)
        if objects.is_new:
            obj_array = objects.object_list
            print(str(len(obj_array)) + " Person(s) detected\n")
            if len(obj_array) > 0:
                first_object = obj_array[0]
                print("First Person attributes:")
                print(" Confidence (" + str(int(first_object.confidence)) + "/100)")
                if obj_param.enable_tracking:
                    print(" Tracking ID: " + str(int(first_object.id)) + " tracking state: " + repr(
                        first_object.tracking_state) + " / " + repr(first_object.action_state))
                position = first_object.position
                velocity = first_object.velocity
                dimensions = first_object.dimensions
                print(" 3D position: [{0},{1},{2}]\n Velocity: [{3},{4},{5}]\n 3D dimentions: [{6},{7},{8}]".format(
                    position[0], position[1], position[2], velocity[0], velocity[1], velocity[2], dimensions[0],
                    dimensions[1], dimensions[2]))
                if first_object.mask.is_init():
                    print(" 2D mask available")

                print(" Keypoint 2D ")
                keypoint_2d = first_object.keypoint_2d
                for it in keypoint_2d:
                    print("    " + str(it))
                print("\n Keypoint 3D ")
                keypoint = first_object.keypoint
                for it in keypoint:
                    print("    " + str(it))

                input('\nPress enter to continue: ')

    # Close the camera
    zed.close()
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init_params.coordinate_units = sl.UNIT.METER
    init_params.sdk_verbose = True

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    obj_param = sl.ObjectDetectionParameters()
    obj_param.enable_tracking=True
    obj_param.image_sync=True
    obj_param.enable_mask_output=True

    camera_infos = zed.get_camera_information()
    if obj_param.enable_tracking :
        positional_tracking_param = sl.PositionalTrackingParameters()
        #positional_tracking_param.set_as_static = True
        positional_tracking_param.set_floor_as_origin = True
        zed.enable_positional_tracking(positional_tracking_param)

    print("Object Detection: Loading Module...")

    err = zed.enable_object_detection(obj_param)
    if err != sl.ERROR_CODE.SUCCESS :
        print (repr(err))
        zed.close()
        exit(1)

    objects = sl.Objects()
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    obj_runtime_param.detection_confidence_threshold = 40

    while zed.grab() == sl.ERROR_CODE.SUCCESS:
        err = zed.retrieve_objects(objects, obj_runtime_param)
        if objects.is_new :
            obj_array = objects.object_list
            print(str(len(obj_array))+" Object(s) detected\n")
            if len(obj_array) > 0 :
                first_object = obj_array[0]
                print("First object attributes:")
                print(" Label '"+repr(first_object.label)+"' (conf. "+str(int(first_object.confidence))+"/100)")
                if obj_param.enable_tracking :
                    print(" Tracking ID: "+str(int(first_object.id))+" tracking state: "+repr(first_object.tracking_state)+" / "+repr(first_object.action_state))
                position = first_object.position
                velocity = first_object.velocity
                dimensions = first_object.dimensions
                print(" 3D position: [{0},{1},{2}]\n Velocity: [{3},{4},{5}]\n 3D dimentions: [{6},{7},{8}]".format(position[0],position[1],position[2],velocity[0],velocity[1],velocity[2],dimensions[0],dimensions[1],dimensions[2]))
                if first_object.mask.is_init():
                    print(" 2D mask available")

                print(" Bounding Box 2D ")
                bounding_box_2d = first_object.bounding_box_2d
                for it in bounding_box_2d :
                    print("    "+str(it),end='')
                print("\n Bounding Box 3D ")
                bounding_box = first_object.bounding_box
                for it in bounding_box :
                    print("    "+str(it),end='')

                input('\nPress enter to continue: ')


    # Close the camera
    zed.close()
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init_params.coordinate_units = sl.UNIT.METER
    init_params.sdk_verbose = True

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)
        
    obj_param = sl.ObjectDetectionParameters()
    obj_param.enable_tracking=True
    obj_param.image_sync=True
    obj_param.enable_mask_output=True

    camera_infos = zed.get_camera_information()
    if obj_param.enable_tracking :
        positional_tracking_param = sl.PositionalTrackingParameters()
        #positional_tracking_param.set_as_static = True
        positional_tracking_param.set_floor_as_origin = True
        zed.enable_positional_tracking(positional_tracking_param)

    print("Object Detection: Loading Module...")

    err = zed.enable_object_detection(obj_param)
    if err != sl.ERROR_CODE.SUCCESS :
        print (repr(err))
        zed.close()
        exit(1)

    objects = sl.Objects()
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    obj_runtime_param.detection_confidence_threshold = 40
    
    while zed.grab() == sl.ERROR_CODE.SUCCESS:
        err = zed.retrieve_objects(objects, obj_runtime_param)
        start = timeit.default_timer()
        if objects.is_new :
            obj_array = objects.object_list
            if len(obj_array) > 0 :
                first_object = obj_array[0]
                print("First object attributes:")
                print(" Label '"+repr(first_object.label)+"' (conf. "+str(int(first_object.confidence))+"/100)")
                position = first_object.position
                dimensions = first_object.dimensions
                print(" 3D position: [{0},{1},{2}]\n 3D dimentions: [{3},{4},{5}]".format(position[0],position[1],position[2],dimensions[0],dimensions[1],dimensions[2]))

                ######################
                image = sl.Mat()
                depth = sl.Mat()
                point_cloud = sl.Mat()
                mirror_ref = sl.Transform()
                mirror_ref.set_translation(sl.Translation(2.75,4.0,0))
                tr_np = mirror_ref.m
                zed.retrieve_image(image, sl.VIEW.LEFT)
                # Retrieve depth map. Depth is aligned on the left image
                zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
                # Retrieve colored point cloud. Point cloud is aligned on the left image.
                zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
                x = round(image.get_width() / 2)
                y = round(image.get_height() / 2)
                err, point_cloud_value = point_cloud.get_value(x, y)

                distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

                point_cloud_np = point_cloud.get_data()
                point_cloud_np.dot(tr_np)
                if not np.isnan(distance) and not np.isinf(distance):
                    print("Distance to Camera at ({}, {}) (image center): {:1.3} m".format(x, y, distance), end="\r")
                else:
                    pass
                
                print("\n Bounding Box 3D ")
                bounding_box = first_object.bounding_box
                stop = timeit.default_timer()
                print("\n FPS:", stop - start)


    # Close the camera
    zed.close()
def main():
    global stop_signal
    thread_list = []
    signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
        #tracing enable
        py_transform = cams.transform_list[index]
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        index = index + 1

    #Start camera threads
    for index in range(0, len(cams.zed_list)):
        if cams.zed_list[index].is_opened():
            thread_list.append(
                threading.Thread(target=grab_run, args=(
                    cams,
                    index,
                )))
            thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr = get_camera_intrintic_info(cams.zed_list[index])
        filename = path + cams.name_list[index] + "-camera-intrinsics.txt"
        df = pd.DataFrame(cam_intr)
        df.to_csv(filename, sep=' ', header=None, index=None)

    #*******************************************************************

    index = 0
    take_cam_id = 0
    for cam in cameras:
        get_cam_data(cams, index, take_cam_id)
        index += 1

    stop_signal = True
    #*******************************************************************
    '''
    key = ' '
    take_cam_id=0
    while key != 113 :

        index=0
        image_ocv_cat=None
        depth_image_ocv_cat=None
        for cam in cameras:
            image_ocv, depth_image_ocv=get_cam_color_depth(cams,index)
            if image_ocv_cat is None:
                image_ocv_cat=image_ocv
                depth_image_ocv_cat=depth_image_ocv
            else:
                image_ocv_cat=np.hstack([image_ocv_cat,image_ocv])
                depth_image_ocv_cat=np.hstack([depth_image_ocv_cat,depth_image_ocv])
            index+=1

        cv2.imshow("Image", image_ocv_cat)
        cv2.imshow("Depth", depth_image_ocv_cat)

        key = cv2.waitKey(10)
        if key == 114 or  key == 82:#R
            index = 0
            for cam in cameras:
                # process_key_event(cams,key,index)
                get_cam_data(cams, index,take_cam_id)
                index+=1
            take_cam_id=take_cam_id+1

    cv2.destroyAllWindows()
    '''
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
示例#11
0
    def process_svo_map_and_pose(self,
                                 map_file="map.obj",
                                 poses_file="poses.txt",
                                 n_frames_to_skip=1,
                                 n_frames_to_trim=0):
        """
        Process the ZED SVO file and write to file:
            - A fused point cloud map
            - The camera pose history

        This function processes the SVO file with the maximum possible depth sensing quality,
        to maximize mapping and pose estimation quality.

        Parameters
        ----------
        map_file: str
            Name of the map file to output. Should be .obj file format.
        poses_file: str
            Name of the poses file ot output. Should be .txt format.
        n_frames_to_skip: int, default 0
            If set to 2 or higher, the pose history output will skip frames. Use this to subsample the SVO output.
            For example, skip_frames=2 will include every other frame in the pose file.
            Does not affect the map creation.

        Returns
        -------
        None

        """
        # Initialize the ZED Camera object
        init_params = self.default_init_params()
        init_params.depth_mode = self.depth_quality_map
        zed = sl.Camera()
        err = zed.open(init_params)

        if err == sl.ERROR_CODE.INVALID_SVO_FILE:
            print("Error processing SVO file: '%s'" % self.svo_path)
            return

        # Initialize positional tracking
        tracking_parameters = sl.PositionalTrackingParameters()
        zed.enable_positional_tracking(tracking_parameters)

        # Initialize spatial mapping
        mapping_parameters = sl.SpatialMappingParameters()
        mapping_parameters.map_type = sl.SPATIAL_MAP_TYPE.FUSED_POINT_CLOUD
        # mapping_parameters.resolution_meter = mapping_parameters.get_resolution_preset(sl.MAPPING_RESOLUTION.MEDIUM)
        mapping_parameters.resolution_meter = mapping_parameters.get_resolution_preset(
            sl.MAPPING_RESOLUTION.HIGH)

        # Map at short range (3.5m) to maximize quality
        # This should reduce errors like points in the sky
        # mapping_parameters.range_meter = mapping_parameters.get_range_preset(sl.MAPPING_RANGE.SHORT)
        # mapping_parameters.range_meter = mapping_parameters.get_range_preset(sl.MAPPING_RANGE.LONG)
        mapping_parameters.range_meter = mapping_parameters.get_range_preset(
            sl.MAPPING_RANGE.MEDIUM)
        zed.enable_spatial_mapping(mapping_parameters)

        pose_history = []

        n_frames = zed.get_svo_number_of_frames()

        assert type(n_frames_to_skip) == int and (1 <= n_frames_to_skip < n_frames), \
            "n_frames_to_skip parameter must be an int between 1 and number of frames in the SVO."

        svo_position = 0  # Keep a separate counter instead of using zed.get_svo_position() - svo position skips sometimes
        next_frame = 0  # keep track of next frame to process, for subsampling frames

        last_frame = zed.get_svo_number_of_frames() - n_frames_to_trim

        # SVO processing loop
        exit = False
        while not exit:
            # With spatial mapping enabled, zed.grab() updates the map in the background
            err = zed.grab()
            if err == sl.ERROR_CODE.SUCCESS:

                if svo_position < last_frame and svo_position >= next_frame:
                    print("\r[Processing frame %d of %d]" %
                          (svo_position, n_frames),
                          end='')
                    pose_history.append(get_zed_pose(zed))
                    next_frame += n_frames_to_skip
                svo_position += 1

            elif err == sl.ERROR_CODE.END_OF_SVOFILE_REACHED:
                print("Mapping module: Reached end of SVO file")
                exit = True

        # Write outputs
        self.write_poses(pose_history, self.output_path / poses_file)
        self.write_map(zed, self.output_path / map_file)
        zed.close()
示例#12
0
def main():
    # Create a ZEDCamera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters.
    # Positional tracking needs to be enabled before using spatial mapping
    py_transform = sl.Transform()
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = zed.enable_positional_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable spatial mapping
    mapping_parameters = sl.SpatialMappingParameters(
        map_type=sl.SPATIAL_MAP_TYPE.FUSED_POINT_CLOUD)
    err = zed.enable_spatial_mapping(mapping_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Grab data during 3000 frames
    i = 0
    py_fpc = sl.FusedPointCloud()  # Create a Mesh object
    runtime_parameters = sl.RuntimeParameters()

    while i < 3000:
        # For each new grab, mesh data is updated
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # In the background, spatial mapping will use newly retrieved images, depth and pose to update the mesh
            mapping_state = zed.get_spatial_mapping_state()

            print("\rImages captured: {0} / 3000 || {1}".format(
                i, mapping_state))

            i = i + 1

    print("\n")

    # Extract, filter and save the mesh in an obj file
    print("Extracting Point Cloud...\n")
    err = zed.extract_whole_spatial_map(py_fpc)
    print(repr(err))
    #print("Filtering Mesh...\n")
    #py_mesh.filter(sl.MeshFilterParameters())  # Filter the mesh (remove unnecessary vertices and faces)
    print("Saving Point Cloud...\n")
    py_fpc.save("fpc.obj")

    # Disable tracking and mapping and close the camera
    zed.disable_spatial_mapping()
    zed.disable_positional_tracking()
    zed.close()
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Webcam Demo")
    parser.add_argument(
        "--config-file",
        default="configs/caffe2/e2e_mask_rcnn_X_101_32x8d_FPN_1x_caffe2.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "--confidence-threshold",
        type=float,
        default=0.6,
        help="Minimum score for the prediction to be shown",
    )
    parser.add_argument(
        "--min-image-size",
        type=int,
        default=256,
        help="Smallest size of the image to feed to the model. "
        "Model was trained with 800, which gives best results",
    )
    parser.add_argument(
        "--show-mask-heatmaps",
        dest="show_mask_heatmaps",
        help="Show a heatmap probability for the top masks-per-dim masks",
        action="store_true",
    )
    parser.add_argument(
        "--masks-per-dim",
        type=int,
        default=2,
        help="Number of heatmaps per dimension to show",
    )
    parser.add_argument(
        "opts",
        help="Modify model config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    parser.add_argument("--svo-filename",
                        help="Optional SVO input filepath",
                        default=None)

    args = parser.parse_args()

    # load config from file and command-line arguments
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    # prepare object that handles inference plus adds predictions on top of image
    coco_demo = COCODemo(
        cfg,
        confidence_threshold=args.confidence_threshold,
        show_mask_heatmaps=args.show_mask_heatmaps,
        masks_per_dim=args.masks_per_dim,
        min_image_size=args.min_image_size,
    )

    init_cap_params = sl.InitParameters()
    if args.svo_filename:
        print("Loading SVO file " + args.svo_filename)
        init_cap_params.set_from_svo_file(args.svo_filename)
        init_cap_params.svo_real_time_mode = True
    init_cap_params.camera_resolution = sl.RESOLUTION.HD720
    init_cap_params.depth_mode = sl.DEPTH_MODE.ULTRA
    init_cap_params.coordinate_units = sl.UNIT.METER
    init_cap_params.depth_stabilization = True
    init_cap_params.camera_image_flip = sl.FLIP_MODE.AUTO
    init_cap_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP

    cap = sl.Camera()
    if not cap.is_opened():
        print("Opening ZED Camera...")
    status = cap.open(init_cap_params)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    display = True
    runtime = sl.RuntimeParameters()
    left = sl.Mat()
    ptcloud = sl.Mat()
    depth_img = sl.Mat()
    depth = sl.Mat()

    res = sl.Resolution(1280, 720)

    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    tracking_parameters.set_as_static = True
    err = cap.enable_positional_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    running = True
    keep_people_only = True

    if coco_demo.cfg.MODEL.MASK_ON:
        print("Mask enabled!")
    if coco_demo.cfg.MODEL.KEYPOINT_ON:
        print("Keypoints enabled!")

    while running:
        start_time = time.time()
        err_code = cap.grab(runtime)
        if err_code != sl.ERROR_CODE.SUCCESS:
            break

        cap.retrieve_image(left, sl.VIEW.LEFT, resolution=res)
        cap.retrieve_image(depth_img, sl.VIEW.DEPTH, resolution=res)
        cap.retrieve_measure(depth, sl.MEASURE.DEPTH, resolution=res)
        cap.retrieve_measure(ptcloud, sl.MEASURE.XYZ, resolution=res)
        ptcloud_np = np.array(ptcloud.get_data())

        img = cv2.cvtColor(left.get_data(), cv2.COLOR_RGBA2RGB)
        prediction = coco_demo.select_top_predictions(
            coco_demo.compute_prediction(img))

        # Keep people only
        if keep_people_only:
            labels_tmp = prediction.get_field("labels")
            people_coco_label = 1
            keep = torch.nonzero(labels_tmp == people_coco_label).squeeze(1)
            prediction = prediction[keep]

        composite = img.copy()
        humans_3d = None
        masks_3d = None
        if coco_demo.show_mask_heatmaps:
            composite = coco_demo.create_mask_montage(composite, prediction)
        composite = coco_demo.overlay_boxes(composite, prediction)
        if coco_demo.cfg.MODEL.MASK_ON:
            masks_3d = get_masks3d(prediction, depth)
            composite = coco_demo.overlay_mask(composite, prediction)
        if coco_demo.cfg.MODEL.KEYPOINT_ON:
            # Extract 3D skeleton from the ZED depth
            humans_3d = get_humans3d(prediction, ptcloud_np)
            composite = coco_demo.overlay_keypoints(composite, prediction)
        if True:
            overlay_distances(prediction, get_boxes3d(prediction, ptcloud_np),
                              composite, humans_3d, masks_3d)
            composite = coco_demo.overlay_class_names(composite, prediction)

        print(" Time: {:.2f} s".format(time.time() - start_time))

        if display:
            cv2.imshow("COCO detections", composite)
            cv2.imshow("ZED Depth", depth_img.get_data())
            key = cv2.waitKey(10)
            if key == 27:
                break  # esc to quit
示例#14
0
def zedGrabber(zedFramesQueue):

    svo_path = "HD1080.svo"
    zed_id = 0
    init_mode = 0

    input_type = sl.InputType()
    if init_mode == 0:
        input_type.set_from_svo_file(svo_path)
    else:
        input_type.set_from_camera_id(zed_id)

    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080
    init.camera_fps = 30
    init.coordinate_units = sl.UNIT.METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Z_UP

    cam = sl.Camera()
    if not cam.is_opened():
        print("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        log.error(repr(status))
        exit()

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = cam.enable_positional_tracking(tracking_parameters)

    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    zed_pose = sl.Pose()
    runtime = sl.RuntimeParameters()

    # Use STANDARD sensing mode
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    mat = sl.Mat()
    point_cloud_mat = sl.Mat()

    # Import the global variables. This lets us instance Darknet once,

    while True:

        start_time = time.time()  # start time of the loop
        err = cam.grab(runtime)

        if (err != sl.ERROR_CODE.SUCCESS):
            break

        cam.retrieve_image(mat, sl.VIEW.LEFT)
        image = mat.get_data()

        cam.retrieve_measure(point_cloud_mat, sl.MEASURE.XYZRGBA)
        depth = point_cloud_mat.get_data()
        cam.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)

        py_translation = sl.Translation()
        tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
        ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
        tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
        camPosition = (tx, ty, tz)
        #print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}".format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))
        camOrientation = zed_pose.get_rotation_vector() * 180 / math.pi
        image = rotate(image, -camOrientation[1], center=None, scale=1.0)
        FPS = 1.0 / (time.time() - start_time)
        # Do the detection
        #rawDetections = detect(netMain, metaMain, image, thresh)
        #detectionsList = detectionsAnalayzer(rawDetections, depth)
        frame = zedFrame(image, depth, camOrientation, camPosition, FPS)
        zedFramesQueue.put(frame)
        #image = cvDrawBoxes(detectionsList, image)
        #image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        #image = cv2.resize(image,(1280,720),interpolation=cv2.INTER_LINEAR)
    cam.close()
GPIO.add_event_detect(pin, GPIO.RISING, callback=cb, bouncetime=200)
print("GPIO setup finished")

## TODO setup zed camera
init = sl.InitParameters(
    camera_resolution=sl.RESOLUTION.HD720,
    coordinate_units=sl.UNIT.METER,
    coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)
zed = sl.Camera()
status = zed.open(init)
if status != sl.ERROR_CODE.SUCCESS:
    print(repr(status))
    exit()

tracking_params = sl.PositionalTrackingParameters(_enable_pose_smoothing=False,
                                                  _set_floor_as_origin=False,
                                                  _enable_imu_fusion=False)
tracking_params.area_file_path = "nsh_chair.area"  #"smith.area"
zed.enable_positional_tracking(tracking_params)

runtime = sl.RuntimeParameters()
camera_pose = sl.Pose()

camera_info = zed.get_camera_information()

py_translation = sl.Translation()
py_orientation = sl.Orientation()
pose_data = sl.Transform()
sensors_data = sl.SensorsData()

text_translation = ""
示例#16
0
# Create a ZED camera object
zed = sl.Camera()

# Set configuration parameters
init_params = sl.InitParameters()
init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode (default fps: 60)
init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP # Use a right-handed Y-up coordinate system
init_params.coordinate_units = sl.UNIT.METER  # Set units in meters

# Open the camera
err = zed.open(init_params)
if err != sl.ERROR_CODE.SUCCESS:
    exit(1)


tracking_parameters = sl.PositionalTrackingParameters()
err = zed.enable_positional_tracking(tracking_parameters)
if err != sl.ERROR_CODE.SUCCESS:
    exit(1)


mapping_parameters = sl.SpatialMappingParameters()
err = zed.enable_spatial_mapping(mapping_parameters)
if err != sl.ERROR_CODE.SUCCESS:
    exit(1)


# Grab data during 3000 frames
i = 0
py_mesh = sl.Mesh()  # Create a Mesh object
runtime_parameters = sl.RuntimeParameters()
def main():
    cam = sl.Camera()
    init = sl.InitParameters()
    init.depth_mode = sl.DEPTH_MODE.ULTRA
    init.coordinate_units = sl.UNIT.METER
    init.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
    
    if len(sys.argv) == 2:
        filepath = sys.argv[1]
        print("Reading SVO file: {0}".format(filepath))
        init.set_from_svo_file(filepath)

    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD
    runtime.measure3D_reference_frame = sl.REFERENCE_FRAME.WORLD
    spatial = sl.SpatialMappingParameters()

    transform = sl.Transform()
    tracking = sl.PositionalTrackingParameters(transform)
    cam.enable_positional_tracking(tracking)

    pymesh = sl.Mesh()
    pyplane = sl.Plane()
    reset_tracking_floor_frame = sl.Transform()
    found = 0
    print("Processing...")
    i = 0




    calibration_params = cam.get_camera_information().calibration_parameters
    # Focal length of the left eye in pixels
    focal_left_x = calibration_params.left_cam.fx
    focal_left_y = calibration_params.left_cam.fy
    # First radial distortion coefficient
    # k1 = calibration_params.left_cam.disto[0]
    # Translation between left and right eye on z-axis
    # tz = calibration_params.T.z
    # Horizontal field of view of the left eye in degrees
    # h_fov = calibration_params.left_cam.h_fov
    print("fx", focal_left_x)
    print("fy", focal_left_y)
    print("cx", calibration_params.left_cam.cx)
    print("cy", calibration_params.left_cam.cy)
    print("distortion", calibration_params.left_cam.disto)

    

    cloud = sl.Mat()
    while i < 1000:
        if cam.grab(runtime) == sl.ERROR_CODE.SUCCESS :
            err = cam.find_floor_plane(pyplane, reset_tracking_floor_frame)
            if err == sl.ERROR_CODE.SUCCESS:
            # if i > 200 and err == sl.ERROR_CODE.SUCCESS:
                found = 1
                print('Floor found!')
                pymesh = pyplane.extract_mesh()
                #get gound plane info
                print("floor normal",pyplane.get_normal())     #print normal of plane
                print("floor equation", pyplane.get_plane_equation())
                print("plane center", pyplane.get_center())
                print("plane pose", pyplane.get_pose())

                # camera_pose = sl.Pose()
                # py_translation = sl.Translation()
                # tracking_state = cam.get_position(camera_pose)
                # # if tracking_state == sl.POSITIONAL_TRACKING_STATE.OK:
                # rotation = camera_pose.get_rotation_vector()
                # rx = round(rotation[0], 2)
                # ry = round(rotation[1], 2)
                # rz = round(rotation[2], 2)

                # translation = camera_pose.get_translation(py_translation)
                # tx = round(translation.get()[0], 2)
                # ty = round(translation.get()[1], 2)
                # tz = round(translation.get()[2], 2)

                # text_translation = str((tx, ty, tz))
                # text_rotation = str((rx, ry, rz))
                # pose_data = camera_pose.pose_data(sl.Transform())
                # print("translation",text_translation)
                # print("rotation",text_rotation)

                cam.retrieve_measure(cloud, sl.MEASURE.XYZRGBA)

                break

                
            i+=1

    if found == 0:
        print('Floor was not found, please try with another SVO.')
        cam.close()
        exit(0)

    #load image
    path = "/home/hcl/Documents/ZED/pics/Explorer_HD1080_SN14932_16-24-06.png"
    im = cv2.imread(path)
    h, w = im.shape[0], im.shape[1]
    print("orignal image shape", h,w)
    im = im[:,:int(w/2),:]      #get left image
    # im = cv2.resize(im,(int(im.shape[1]/2),int(im.shape[0]/2)))


    #warped img
    warped = np.zeros((h,w,3))

    roll = -80
    roll *= np.pi/180

    Rx = np.matrix([
    [1, 0,0],
    [0, np.cos(roll), -np.sin(roll)],
    [0, np.sin(roll), np.cos(roll)]
    ])

    #point cloud
    # i = 500
    # j = 355
    if False:
        point_cloud = sl.Mat()
        cam.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
        for i in range(100):
            for j in range(1000):        
                point3D = point_cloud.get_value(i,j)
                # x = point3D[0]
                # y = point3D[1]
                # z = point3D[2]
                color = point3D[1][3]
                if ~np.isnan(color) and ~np.isinf(color):
                    xyz = point3D[1][:3].reshape((-1,1))
                    # xyz_hom = np.vstack((xyz,1))
                    print('point3D',point3D)
                    warped_index = Rx @ xyz             #NEED TO USE H**O COORDINATES? divide by last column?
                    print('warped_index',warped_index)
                    RGB = im[j,i,:]
                    RGB = np.dstack(([255],[255],[255]))
                    print('RGB',RGB)
                    warped[int(np.round(warped_index[0]+10)),int(np.round(warped_index[1])),:] = RGB

    cam.disable_positional_tracking()
    # save_mesh(pymesh)
    cam.close()
    print("\nFINISH")