示例#1
0
def init_params(menu, cam_id=0):
    global save_dir
    zed = EasyDict({})
    zed.cam = sl.Camera()
    zed.mat = EasyDict({
        'pose': sl.Pose(),
        'translation': sl.Translation(),
        'transform': sl.Transform(),
        'image': sl.Mat(),  # image_map
        'depth': sl.Mat(),  # depth_map
        'point_cloud': sl.Mat(),
        'sensors': sl.SensorsData()  # sensors_data
    })

    zed.param = EasyDict({
        'init':
        sl.InitParameters(
            camera_resolution=mode.resolution[menu.cam.resolution],
            depth_mode=mode.depth[menu.mode.depth],
            coordinate_units=mode.unit[menu.unit],
            coordinate_system=mode.coordinate_system[menu.coordinate_system],
            depth_minimum_distance=menu.depth_range.min,
            depth_maximum_distance=menu.depth_range.max,
            sdk_verbose=verbose),
        'runtime':
        sl.RuntimeParameters(sensing_mode=mode.sensing[menu.mode.sensing]),
        'tracking':
        sl.PositionalTrackingParameters(zed.mat.transform)
    })

    #######
    zed.param.init.set_from_camera_id(cam_id)
    save_dir = save_dir_fmt.format(cam_id)
    return zed
示例#2
0
def capture_data(zed, height, width, length, no_cajas, no_cajas_base, no_cajas_alto) :
    print('\n##################################################')
    print("#Capturando data, no mover la cámara ni el objeto#")
    print('##################################################')

    # capture RGB image
    tmp_img = sl.Mat()
    zed.retrieve_image(tmp_img, sl.VIEW.LEFT)

    # capture depth data
    tmp_depth = sl.Mat()
    zed.retrieve_measure(tmp_depth, sl.MEASURE.DEPTH)

    # capture pointcloud
    tmp_pointcloud = sl.Mat()
    zed.retrieve_measure(tmp_pointcloud, sl.MEASURE.XYZRGBA) 

    # capture sensors data
    tmp_sensors = sl.SensorsData()
    zed.get_sensors_data(tmp_sensors, sl.TIME_REFERENCE.IMAGE)

    # get timestamp
    ts = get_timestamp()

    # create subdir
    # subdir of current capture
    subdir = os.path.join('data', ts)
    os.makedirs(subdir, exist_ok=True)
    
    # Saving RGB image
    save_rgb(subdir, tmp_img)

    # saving depth image
    save_depth(subdir, tmp_depth)

    # saving pointcloud
    save_pointcloud(subdir, tmp_pointcloud)

    # saving sensors data
    save_sensors_data(subdir, tmp_sensors)

    # get and save dimensions
    save_dimensions_data(subdir, height, width, length, no_cajas, no_cajas_base, no_cajas_alto)

    # done
    print('Listo para capturar siguiente objeto')
示例#3
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    init_params = sl.InitParameters()
    init_params.depth_mode = sl.DEPTH_MODE.NONE

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        zed.close()
        exit(1)

    # Get camera information sensors_data
    info = zed.get_camera_information()

    cam_model = info.camera_model
    if cam_model == sl.MODEL.ZED:
        print(
            "This tutorial only supports ZED-M and ZED2 camera models, ZED does not have additional sensors"
        )
        exit(1)

    # Display camera information (model,S/N, fw version)
    print("Camera Model: " + str(cam_model))
    print("Serial Number: " + str(info.serial_number))
    print("Camera Firmware: " +
          str(info.camera_configuration.firmware_version))
    print("Sensors Firmware: " +
          str(info.sensors_configuration.firmware_version))

    # Display sensors parameters (imu,barometer,magnetometer)
    printSensorParameters(info.sensors_configuration.accelerometer_parameters
                          )  # accelerometer configuration
    printSensorParameters(info.sensors_configuration.gyroscope_parameters
                          )  # gyroscope configuration
    printSensorParameters(info.sensors_configuration.magnetometer_parameters
                          )  # magnetometer configuration
    printSensorParameters(info.sensors_configuration.barometer_parameters
                          )  # barometer configuration

    # Used to store the sensors timestamp to know if the sensors_data is a new one or not
    ts_handler = TimestampHandler()

    # Get Sensor Data for 5 seconds
    i = 0
    sensors_data = sl.SensorsData()

    while i < 100:
        # retrieve the current sensors sensors_data
        # Depending on your Camera model or its firmware, differents sensors are presents.
        # They do not run at the same rate: Therefore, to do not miss samples we iterate as fast as we can and compare timestamp to know when a sensors_data is a new one
        # NOTE: There is no need to acquire images with grab() function. Sensors sensors_data are running in a separated internal capture thread.
        if zed.get_sensors_data(
                sensors_data,
                sl.TIME_REFERENCE.CURRENT) == sl.ERROR_CODE.SUCCESS:
            # Check if the data has been updated since the last time
            # IMU is the sensor with the highest rate
            if ts_handler.is_new(sensors_data.get_imu_data()):
                print("Sample " + str(i))

                print(" - IMU:")
                # Filtered orientation quaternion
                quaternion = sensors_data.get_imu_data().get_pose(
                ).get_orientation().get()
                print(" \t Orientation: [ Ox: {0}, Oy: {1}, Oz {2}, Ow: {3} ]".
                      format(quaternion[0], quaternion[1], quaternion[2],
                             quaternion[3]))

                # linear acceleration
                linear_acceleration = sensors_data.get_imu_data(
                ).get_linear_acceleration()
                print(" \t Acceleration: [ {0} {1} {2} ] [m/sec^2]".format(
                    linear_acceleration[0], linear_acceleration[1],
                    linear_acceleration[2]))

                # angular velocities
                angular_velocity = sensors_data.get_imu_data(
                ).get_angular_velocity()
                print(
                    " \t Angular Velocities: [ {0} {1} {2} ] [deg/sec]".format(
                        angular_velocity[0], angular_velocity[1],
                        angular_velocity[2]))

                # Check if Magnetometer data has been updated (not the same frequency than IMU)
                if ts_handler.is_new(sensors_data.get_magnetometer_data()):
                    magnetic_field_calibrated = sensors_data.get_magnetometer_data(
                    ).get_magnetic_field_calibrated()
                    print(
                        " - Magnetometer\n \t Magnetic Field: [ {0} {1} {2} ] [uT]"
                        .format(magnetic_field_calibrated[0],
                                magnetic_field_calibrated[1],
                                magnetic_field_calibrated[2]))

                # Check if Barometer data has been updated
                if ts_handler.is_new(sensors_data.get_barometer_data()):
                    magnetic_field_calibrated = sensors_data.get_barometer_data(
                    ).pressure
                    print(" - Barometer\n \t Atmospheric pressure: {0} [hPa]".
                          format(sensors_data.get_barometer_data().pressure))

                i = i + 1

    zed.close()
    return 0
示例#4
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = sl.Transform(
    )  # First create a Transform object for TrackingParameters object
    tracking_parameters = sl.PositionalTrackingParameters(
        init_pos=py_transform)
    err = zed.enable_positional_tracking(tracking_parameters)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = sl.Pose()

    zed_sensors = sl.SensorsData()
    runtime_parameters = sl.RuntimeParameters()

    while i < 1000:
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose, sl.REFERENCE_FRAME.WORLD)
            zed.get_sensors_data(zed_sensors, sl.TIME_REFERENCE.IMAGE)
            zed_imu = zed_sensors.get_imu_data()

            # Display the translation and timestamp
            py_translation = sl.Translation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".
                  format(tx, ty, tz, zed_pose.timestamp.get_milliseconds()))

            # Display the orientation quaternion
            py_orientation = sl.Orientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                ox, oy, oz, ow))

            #Display the IMU acceleratoin
            acceleration = [0, 0, 0]
            zed_imu.get_linear_acceleration(acceleration)
            ax = round(acceleration[0], 3)
            ay = round(acceleration[1], 3)
            az = round(acceleration[2], 3)
            print("IMU Acceleration: Ax: {0}, Ay: {1}, Az {2}\n".format(
                ax, ay, az))

            #Display the IMU angular velocity
            a_velocity = [0, 0, 0]
            zed_imu.get_angular_velocity(a_velocity)
            vx = round(a_velocity[0], 3)
            vy = round(a_velocity[1], 3)
            vz = round(a_velocity[2], 3)
            print("IMU Angular Velocity: Vx: {0}, Vy: {1}, Vz {2}\n".format(
                vx, vy, vz))

            # Display the IMU orientation quaternion
            zed_imu_pose = sl.Transform()
            ox = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[0], 3)
            oy = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[1], 3)
            oz = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[2], 3)
            ow = round(
                zed_imu.get_pose(zed_imu_pose).get_orientation().get()[3], 3)
            print(
                "IMU Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                    ox, oy, oz, ow))

            i = i + 1

    # Close the camera
    zed.close()
def main():
    # global stop_signal
    # signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,#■
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
            exit(1)
        #tracing enable
        py_transform = cams.transform_list[index]
        print("PositionalTrackingParameters start")
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        print("PositionalTrackingParameters end")
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        runtime = sl.RuntimeParameters()
        cams.runtime_list.append(runtime)
        index = index + 1

    #Start camera threads
    # for index in range(0, len(cams.zed_list)):
    #     if cams.zed_list[index].is_opened():
    #         thread_list.append(threading.Thread(target=grab_run, args=(cams,index,)))
    #         thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        fd_cam = f'{basePath}/{cams.name_list[index]}'
        os.makedirs(fd_cam, exist_ok=True)
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr, distortion = get_camera_intrintic_info(cams.zed_list[index])
        filename = f'{fd_cam}/camera-intrinsics.csv'
        np.savetxt(filename, cam_intr)
        filename = f'{fd_cam}/camera-distortion.csv'
        np.savetxt(filename, distortion)

    #*******************************************************************
    take_by_keyinput(cameras, cams)
    # take_by_keyinput_camera_view(cameras, cams)
    #*******************************************************************
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
示例#6
0
def main() :
    # Create a ZED camera object
    zed = sl.Camera()

    # Set configuration parameters
    input_type = sl.InputType()
    init = sl.InitParameters(input_t=input_type)
    init.camera_resolution = sl.RESOLUTION.HD1080
    #init.camera_resolution = sl.RESOLUTION.HD720
    init.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init.coordinate_units = sl.UNIT.MILLIMETER
    #init.coordinate_units = sl.UNIT.METER

    # Open the camera
    err = zed.open(init)
    if err != sl.ERROR_CODE.SUCCESS :
        print(repr(err))
        zed.close()
        exit(1)

    # Display help in console
    print_help()

    # Creating GUI
    gui = ZED_GUI()

    # Set runtime parameters after opening the camera
    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.STANDARD

    # Prepare new image size to retrieve half-resolution images
    image_size = zed.get_camera_information().camera_resolution
    image_size.width = image_size.width /2
    image_size.height = image_size.height /2

    # Declare your sl.Mat matrices
    image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    depth_image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    point_cloud = sl.Mat()

    sensors_data = sl.SensorsData()

    key = ' '
    event = ' '
    while key != 113:
        err = zed.grab(runtime)

        event, values = gui.window.read(timeout=20)
        if event == "Exit" or event == sg.WIN_CLOSED:
            break

        if event == "Capture":
            try:
                height = float(values["-HEIGHT-"])
                width  = float(values["-WIDTH-"])
                length = float(values["-LENGTH-"])
                no_cajas = float(values["-NO_CAJAS-"])
                no_cajas_base = float(values["-NO_CAJAS_BASE-"])
                no_cajas_alto = float(values["-NO_CAJAS_ALTO-"])

                sg.popup_timed('Capturando data, no mover la cámara ni el objeto', title='Capturando data', auto_close_duration=5, non_blocking=True)

                capture_data(zed, height, width, length, no_cajas, no_cajas_base, no_cajas_alto)

                gui.window["-HEIGHT-"].update('')
                gui.window["-WIDTH-"].update('')
                gui.window["-LENGTH-"].update('')
                gui.window["-NO_CAJAS-"].update('1')
                gui.window["-NO_CAJAS_BASE-"].update('1')
                gui.window["-NO_CAJAS_ALTO-"].update('1')

                sg.popup_timed('Captura guardada correctamente. Listo para continuar', title='Captura exitosa', auto_close_duration=5)
            except:
                sg.popup_timed('Debe ingresar valor numérico en los tres campos. Decimales con punto.', title='Valores incorrectos', auto_close_duration=5)
                pass

        if err == sl.ERROR_CODE.SUCCESS :
            # Retrieve the left image, depth image in the half-resolution
            zed.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size)
            zed.retrieve_image(depth_image_zed, sl.VIEW.DEPTH, sl.MEM.CPU, image_size)
            # capture sensors data
            zed.get_sensors_data(sensors_data, sl.TIME_REFERENCE.IMAGE)

            # To recover data from sl.Mat to use it with opencv, use the get_data() method
            # It returns a numpy array that can be used as a matrix with opencv
            image_ocv = image_zed.get_data()
            depth_image_ocv = depth_image_zed.get_data()

            # concatenate both images to show them side by side in the GUI
            sbs_image = np.concatenate((image_ocv, depth_image_ocv), axis=1)
            imgbytes = cv2.imencode(".png", sbs_image)[1].tobytes()
            gui.window["-IMAGE-"].update(data=imgbytes)

            # show sensors data
            quaternion, linear_acceleration, angular_velocity, magnetic_field_calibrated, atmospheric_pressure =\
                get_data_from_sensors(sensors_data)

            gui.window["-IMU_ORIENTATION-"].update(quaternion)
            gui.window["-IMU_ACCELERATION-"].update(linear_acceleration)
            gui.window["-IMU_ANG_VEL-"].update(angular_velocity)
            gui.window["-IMU_MAG_FIELD-"].update(magnetic_field_calibrated)
            gui.window["-IMU_ATM_PRESS-"].update(atmospheric_pressure)

            key = cv2.waitKey(10)

    cv2.destroyAllWindows()
    zed.close()
    gui.window.close()

    print("\nFINISH")
tracking_params = sl.PositionalTrackingParameters(_enable_pose_smoothing=False,
                                                  _set_floor_as_origin=False,
                                                  _enable_imu_fusion=False)
tracking_params.area_file_path = "nsh_chair.area"  #"smith.area"
zed.enable_positional_tracking(tracking_params)

runtime = sl.RuntimeParameters()
camera_pose = sl.Pose()

camera_info = zed.get_camera_information()

py_translation = sl.Translation()
py_orientation = sl.Orientation()
pose_data = sl.Transform()
sensors_data = sl.SensorsData()

text_translation = ""

ui = HSplit(
    VSplit(
        Log("Timestamp", color=3, border_color=5),
        Log("OLA Data", color=3, border_color=5),
    ),
    VSplit(
        HGauge(val=0, title="tracker accuracy", border_color=5),
        HGauge(val=0, title="mapper accuracy", border_color=5),
    ))

run_dashing_display = True
def main():
    global stop_signal
    thread_list = []
    signal.signal(signal.SIGINT, signal_handler)
    # List and open cameras
    cameras = sl.Camera.get_device_list()
    index = 0
    cams = EasyDict({})

    cams.pose_list = []
    cams.zed_sensors_list = []
    cams.zed_list = []
    cams.left_list = []
    cams.depth_list = []
    cams.pointcloud_list = []
    cams.timestamp_list = []
    cams.image_size_list = []
    cams.image_zed_list = []
    cams.depth_image_zed_list = []
    cams.name_list = []
    cams.name_list = []
    cams.py_translation_list = []
    cams.py_orientation_list = []
    cams.transform_list = []
    cams.runtime_list = []
    # Set configuration parameters

    init = sl.InitParameters(
        camera_resolution=sl.RESOLUTION.HD2K,
        coordinate_units=sl.UNIT.METER,
        #coordinate_units=sl.UNIT.MILLIMETER,
        depth_mode=sl.DEPTH_MODE.PERFORMANCE,
        coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP)

    for cam in cameras:
        init.set_from_serial_number(cam.serial_number)
        cams.name_list.append("ZED_{}".format(cam.serial_number))
        print("Opening {}".format(cams.name_list[index]))
        # Create a ZED camera object
        cams.zed_list.append(sl.Camera())
        cams.left_list.append(sl.Mat())
        cams.depth_list.append(sl.Mat())
        cams.pointcloud_list.append(sl.Mat())
        cams.pose_list.append(sl.Pose())
        cams.zed_sensors_list.append(sl.SensorsData())
        cams.timestamp_list.append(0)
        cams.py_translation_list.append(sl.Translation())
        cams.transform_list.append(sl.Transform())
        cams.py_orientation_list.append(sl.Orientation())

        # Open the camera
        status = cams.zed_list[index].open(init)
        if status != sl.ERROR_CODE.SUCCESS:
            print(repr(status))
            cams.zed_list[index].close()
        #tracing enable
        py_transform = cams.transform_list[index]
        tracking_parameters = sl.PositionalTrackingParameters(
            init_pos=py_transform)
        err = cams.zed_list[index].enable_positional_tracking(
            tracking_parameters)
        if err != sl.ERROR_CODE.SUCCESS:
            cams.zed_list[index].close()
            exit(1)
        index = index + 1

    #Start camera threads
    for index in range(0, len(cams.zed_list)):
        if cams.zed_list[index].is_opened():
            thread_list.append(
                threading.Thread(target=grab_run, args=(
                    cams,
                    index,
                )))
            thread_list[index].start()

    #https://github.com/stereolabs/zed-examples/blob/master/tutorials/tutorial%204%20-%20positional%20tracking/python/positional_tracking.py

    # py_translation = sl.Translation()
    # Display help in console
    print_help()

    # Prepare new image size to retrieve half-resolution images
    for index, cam in enumerate(cameras):
        image_size = cams.zed_list[index].get_camera_information(
        ).camera_resolution
        image_size.width = image_size.width / 2
        image_size.height = image_size.height / 2  # Declare your sl.Mat matrices
        #image_zed = cams.left_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        #depth_image_zed = cams.depth_list[index](image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
        image_zed = sl.Mat(image_size.width, image_size.height,
                           sl.MAT_TYPE.U8_C4)
        depth_image_zed = sl.Mat(image_size.width, image_size.height,
                                 sl.MAT_TYPE.U8_C4)
        cams.image_size_list.append(image_size)
        cams.image_zed_list.append(image_zed)
        cams.depth_image_zed_list.append(depth_image_zed)
        ########
        cam_intr = get_camera_intrintic_info(cams.zed_list[index])
        filename = path + cams.name_list[index] + "-camera-intrinsics.txt"
        df = pd.DataFrame(cam_intr)
        df.to_csv(filename, sep=' ', header=None, index=None)

    #*******************************************************************

    index = 0
    take_cam_id = 0
    for cam in cameras:
        get_cam_data(cams, index, take_cam_id)
        index += 1

    stop_signal = True
    #*******************************************************************
    '''
    key = ' '
    take_cam_id=0
    while key != 113 :

        index=0
        image_ocv_cat=None
        depth_image_ocv_cat=None
        for cam in cameras:
            image_ocv, depth_image_ocv=get_cam_color_depth(cams,index)
            if image_ocv_cat is None:
                image_ocv_cat=image_ocv
                depth_image_ocv_cat=depth_image_ocv
            else:
                image_ocv_cat=np.hstack([image_ocv_cat,image_ocv])
                depth_image_ocv_cat=np.hstack([depth_image_ocv_cat,depth_image_ocv])
            index+=1

        cv2.imshow("Image", image_ocv_cat)
        cv2.imshow("Depth", depth_image_ocv_cat)

        key = cv2.waitKey(10)
        if key == 114 or  key == 82:#R
            index = 0
            for cam in cameras:
                # process_key_event(cams,key,index)
                get_cam_data(cams, index,take_cam_id)
                index+=1
            take_cam_id=take_cam_id+1

    cv2.destroyAllWindows()
    '''
    index = 0
    for cam in cameras:
        cams.zed_list[index].close()
        index += 1
    print("\nFINISH")
示例#9
0
def main():
    if not sys.argv or len(sys.argv) != 2:
        print("Only the path of the output SVO file should be passed as argument.")
        exit(1)
    path = 'svo_recordings/'
    try:
        os.makedirs(path)
    except FileExistsError:
        # directory already exists
        pass

    init = sl.InitParameters()
    init.camera_resolution = sl.RESOLUTION.HD720
    init.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init.coordinate_units = sl.UNIT.MILLIMETER


    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)

    path_output = path + sys.argv[1]
    recording_param = sl.RecordingParameters(path_output, sl.SVO_COMPRESSION_MODE.H264)
    err = cam.enable_recording(recording_param)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit(1)

    runtime = sl.RuntimeParameters()
    runtime.sensing_mode = sl.SENSING_MODE.FILL
    print("SVO is Recording, use Ctrl-C to stop.")
    frames_recorded = 0
    
    # Declare your sl.Mat matrices
    image_size = cam.get_camera_information().camera_resolution
    image_size.width = image_size.width /2
    image_size.height = image_size.height /2
    image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    depth_image_zed = sl.Mat(image_size.width, image_size.height, sl.MAT_TYPE.U8_C4)
    
    # Used to store the sensors timestamp to know if the sensors_data is a new one or not
    ts_handler = TimestampHandler()
    sensors_data = sl.SensorsData()
    rows_list = []
    
    key = ' '
    while key != 113:
        if cam.grab(runtime) == sl.ERROR_CODE.SUCCESS :
            frames_recorded += 1

            # retrieve the current sensors sensors_data
            # Depending on your Camera model or its firmware, differents sensors are presents.
            # They do not run at the same rate: Therefore, to do not miss samples we iterate as fast as we can and compare timestamp to know when a sensors_data is a new one
            # NOTE: There is no need to acquire images with grab() function. Sensors sensors_data are running in a separated internal capture thread.
            if cam.get_sensors_data(sensors_data, sl.TIME_REFERENCE.CURRENT) == sl.ERROR_CODE.SUCCESS :
                # Check if the data has been updated since the last time
                # IMU is the sensor with the highest rate
                if ts_handler.is_new(sensors_data.get_imu_data()):
                    print("Sample " + str(frames_recorded))

                    print(" - IMU:")
                    # Filtered orientation quaternion
                    quaternion = sensors_data.get_imu_data().get_pose().get_orientation().get()
                    print(" \t Orientation: [ Ox: {0}, Oy: {1}, Oz {2}, Ow: {3} ]".format(quaternion[0], quaternion[1], quaternion[2], quaternion[3]))
                    
                    # linear acceleration
                    linear_acceleration = sensors_data.get_imu_data().get_linear_acceleration()
                    print(" \t Acceleration: [ {0} {1} {2} ] [m/sec^2]".format(linear_acceleration[0], linear_acceleration[1], linear_acceleration[2]))

                    # angular velocities
                    angular_velocity = sensors_data.get_imu_data().get_angular_velocity()
                    print(" \t Angular Velocities: [ {0} {1} {2} ] [deg/sec]".format(angular_velocity[0], angular_velocity[1], angular_velocity[2]))
                    
                    dict1 = {'frame':frames_recorded, 'time':sensors_data.get_imu_data().timestamp.get_microseconds(),
                    'Ox':quaternion[0] ,'Oy':quaternion[1], 'Oz':quaternion[2], 'Ow':quaternion[3],
                    'Ax':linear_acceleration[0], 'Ay':linear_acceleration[1], 'Az':linear_acceleration[2],
                    'AVx':angular_velocity[0], 'AVy':angular_velocity[1], 'AVz':angular_velocity[2]}
                    rows_list.append(dict1)

            cam.retrieve_image(image_zed, sl.VIEW.LEFT, sl.MEM.CPU, image_size)
            cam.retrieve_image(depth_image_zed, sl.VIEW.DEPTH, sl.MEM.CPU, image_size)
            # To recover data from sl.Mat to use it with opencv, use the get_data() method
            # It returns a numpy array that can be used as a matrix with opencv
            image_ocv = image_zed.get_data()
            depth_image_ocv = depth_image_zed.get_data()

            cv2.imshow("Image", image_ocv)
            cv2.imshow("Depth", depth_image_ocv)

            key = cv2.waitKey(1)
            print("Frame count: " + str(frames_recorded), end="\r")

    df = pd.DataFrame(rows_list)
    df.to_csv(path_output[:-4]+'.csv')

    cv2.destroyAllWindows()
    cam.disable_recording()
    cam.close()

    print("\nFINISH")
示例#10
0
    def _poll(self):
        last_time = self.frame_time
        self.frame_time = time.time() - self.start_time
        self.frame_count += 1

        #
        # get the frames
        #
        runtime_parameters = sl.RuntimeParameters()

        if self.enable_rgb:
            image = sl.Mat()

        if self.enable_depth:
            depth = sl.Mat()
            point_cloud = sl.Mat()

        # Grab an image, a RuntimeParameters object must be given to grab()
        if self.zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # A new image is available if grab() returns ERROR_CODE.SUCCESS

            if self.enable_rgb:
                # Get the left image
                self.zed.retrieve_image(image, sl.VIEW.LEFT)

                # Get an np array from ZED Matrix
                rgba_np_image = image.get_data()

                # Drop the alpha channel
                rgb_np_image = rgba_np_image[:, :, :-1]

                # Convert rgb to bgr
                color_image = rgb_np_image[:, :, ::-1]

                if self.verbose:
                    # Get the image timestamp
                    timestamp = self.zed.get_timestamp(sl.TIME_REFERENCE.IMAGE)
                    print(
                        "Image resolution: {0} x {1} || Image timestamp: {2}\n"
                        .format(image.get_width(), image.get_height(),
                                timestamp.get_milliseconds()),
                        end="\r")

            if self.enable_depth:
                # Retrieve depth matrix. Depth is aligned on the left RGB image
                self.zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
                depth_image = depth.get_data()

                # Retrieve colored point cloud
                self.zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
                self.point_cloud = point_cloud.get_data()

                if self.verbose and depth_image is not None:
                    y, x = depth_image.shape
                    x = x // 2
                    y = y // 2
                    depth_value = depth_image[x, y]
                    print("Distance to Camera at ({0}, {1}): {2} mm".format(
                        x, y, depth_value),
                          end="\r")

            if self.resize:
                import cv2
                if self.width != WIDTH or self.height != HEIGHT:
                    color_image = cv2.resize(
                        color_image, (self.width, self.height),
                        cv2.INTER_NEAREST) if self.enable_rgb else None
                    depth_image = cv2.resize(
                        depth_image, (self.width, self.height),
                        cv2.INTER_NEAREST) if self.enable_depth else None
                if self.channels != CHANNELS:
                    color_image = cv2.cvtColor(
                        color_image,
                        cv2.COLOR_BRG2GRAY) if self.enable_rgb else None

            self.color_image = color_image
            self.depth_image = depth_image

            if self.enable_imu:
                sensors_data = sl.SensorsData()
                self.zed.get_sensors_data(sensors_data,
                                          sl.TIME_REFERENCE.CURRENT)

                if self.zed_timestamp_handler.is_new(
                        sensors_data.get_imu_data()):
                    self.imu_quaternion = sensors_data.get_imu_data().get_pose(
                    ).get_orientation().get()
                    self.linear_acceleration = sensors_data.get_imu_data(
                    ).get_linear_acceleration()
                    self.angular_velocity = sensors_data.get_imu_data(
                    ).get_angular_velocity()

                    if self.verbose:
                        print("IMU Orientation: {}".format(
                            self.imu_quaternion))
                        print("IMU Acceleration: {} [m/sec^2]".format(
                            self.linear_acceleration))
                        print("IMU Angular Velocity: {} [deg/sec]".format(
                            self.angular_velocity))

                # Check if Magnetometer data has been updated
                if self.zed_timestamp_handler.is_new(
                        sensors_data.get_magnetometer_data()):
                    self.magnetic_field = sensors_data.get_magnetometer_data(
                    ).get_magnetic_field_calibrated()

                    if self.verbose:
                        print("Magnetometer Magnetic Field: {} [uT]".format(
                            self.magnetic_field))

                # Check if Barometer data has been updated
                if self.zed_timestamp_handler.is_new(
                        sensors_data.get_barometer_data()):
                    self.barometer_pressure = sensors_data.get_barometer_data(
                    ).pressure

                    if self.verbose:
                        print(
                            "Barometer Atmospheric pressure: {} [hPa]".format(
                                self.barometer_pressure))
示例#11
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.coordinate_system = sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.UNIT.METER

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        zed.close()
        exit(1)

    cam_model = zed.get_camera_information().camera_model
    if cam_model == sl.MODEL.ZED:
        print("This tutorial only supports ZED-M and ZED2 camera models")
        exit(1)

    # Get Sensor Data for 2 seconds (800 samples)
    i = 0
    data = sl.SensorsData()
    first_ts = sl.Timestamp()
    prev_imu_ts = sl.Timestamp()
    prev_baro_ts = sl.Timestamp()
    prev_mag_ts = sl.Timestamp()
    while i < 800:
        # Get Sensor Data not synced with image frames
        if zed.get_sensors_data(
                data, sl.TIME_REFERENCE.CURRENT) != sl.ERROR_CODE.SUCCESS:
            print("Error retrieving Sensor Data")
            break

        imu_ts = data.get_imu_data().timestamp

        if i == 0:
            first_ts = imu_ts

        # Check if Sensors Data are updated
        if prev_imu_ts.data_ns == imu_ts.data_ns:
            continue

        prev_imu_ts = imu_ts

        print("*** Sample #" + str(i))

        seconds = data.get_imu_data().timestamp.get_seconds(
        ) - first_ts.get_seconds()
        print(" * Relative timestamp: " + str(seconds) + " sec")

        # Filtered orientation quaternion
        zed_imu = data.get_imu_data()

        #Display the IMU acceleratoin
        acceleration = [0, 0, 0]
        zed_imu.get_linear_acceleration(acceleration)
        ax = round(acceleration[0], 3)
        ay = round(acceleration[1], 3)
        az = round(acceleration[2], 3)
        print("IMU Acceleration: Ax: {0}, Ay: {1}, Az {2}\n".format(
            ax, ay, az))

        #Display the IMU angular velocity
        a_velocity = [0, 0, 0]
        zed_imu.get_angular_velocity(a_velocity)
        vx = round(a_velocity[0], 3)
        vy = round(a_velocity[1], 3)
        vz = round(a_velocity[2], 3)
        print("IMU Angular Velocity: Vx: {0}, Vy: {1}, Vz {2}\n".format(
            vx, vy, vz))

        # Display the IMU orientation quaternion
        zed_imu_pose = sl.Transform()
        ox = round(
            zed_imu.get_pose(zed_imu_pose).get_orientation().get()[0], 3)
        oy = round(
            zed_imu.get_pose(zed_imu_pose).get_orientation().get()[1], 3)
        oz = round(
            zed_imu.get_pose(zed_imu_pose).get_orientation().get()[2], 3)
        ow = round(
            zed_imu.get_pose(zed_imu_pose).get_orientation().get()[3], 3)
        print("IMU Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
            ox, oy, oz, ow))

        if cam_model == sl.MODEL.ZED2:

            # IMU temperature
            location = sl.SENSOR_LOCATION.IMU
            temp = data.get_temperature_data().get(location)
            if temp != -1:
                print(" *  IMU temperature: " + str(temp) + "C")

            # Check if Magnetometer Data are updated
            mag_ts = data.get_magnetometer_data().timestamp
            if (prev_mag_ts.data_ns != mag_ts.data_ns):
                prev_mag_ts = mag_ts
                mx = round(data.get_magnetometer_data().
                           get_magnetic_field_calibrated()[0])
                my = round(data.get_magnetometer_data().
                           get_magnetic_field_calibrated()[1])
                mz = round(data.get_magnetometer_data().
                           get_magnetic_field_calibrated()[2])
                print(" * Magnetic Fields [uT]: x: {0}, y: {1}, z: {2}".format(
                    mx, my, mz))

            baro_ts = data.get_barometer_data().timestamp
            if (prev_baro_ts.data_ns != baro_ts.data_ns):
                prev_baro_ts = baro_ts

                # Atmospheric pressure
                print(" * Atmospheric pressure [hPa]: " +
                      str(data.get_barometer_data().pressure))

                # Barometer temperature
                location = sl.SENSOR_LOCATION.BAROMETER
                baro_temp = data.get_temperature_data().get(location)
                if baro_temp != -1:
                    print(" * Barometer temperature: " + str(temp) + "C")

                # Camera temperatures
                location_left = sl.SENSOR_LOCATION.ONBOARD_LEFT
                location_right = sl.SENSOR_LOCATION.ONBOARD_RIGHT

                left_temp = data.get_temperature_data().get(location_left)
                right_temp = data.get_temperature_data().get(location_right)
                print(" * Camera left temperature: " + str(left_temp))
                print(" * Camera right temperature: " + str(right_temp))

        i = i + 1

    zed.close()
    return 0