def main():

    init = zcam.PyInitParameters(
        camera_resolution=sl.PyRESOLUTION.PyRESOLUTION_HD720,
        depth_mode=sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE,
        coordinate_units=sl.PyUNIT.PyUNIT_METER,
        coordinate_system=sl.PyCOORDINATE_SYSTEM.
        PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
        sdk_verbose=True)
    cam = zcam.PyZEDCamera()
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    transform = core.PyTransform()
    tracking_params = zcam.PyTrackingParameters(transform)
    cam.enable_tracking(tracking_params)

    runtime = zcam.PyRuntimeParameters()
    camera_pose = zcam.PyPose()

    viewer = tv.PyTrackingViewer()
    viewer.init()

    py_translation = core.PyTranslation()

    start_zed(cam, runtime, camera_pose, viewer, py_translation)

    viewer.exit()
    glutMainLoop()
コード例 #2
0
    def __init__(self, inputQueue, outputQueue, visualize):
        self.inputQueue = inputQueue
        self.outputQueue = outputQueue
        #self.outputQueueImages = outputQueueImages
        self.visualize = visualize
        self.xlist = []
        self.ylist = []
        if (self.visualize):
            plt.figure(1)
        init = zcam.PyInitParameters(
            camera_resolution=sl.PyRESOLUTION.PyRESOLUTION_VGA,
            depth_mode=sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE,
            coordinate_units=sl.PyUNIT.PyUNIT_METER,
            coordinate_system=sl.PyCOORDINATE_SYSTEM.
            PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
            sdk_verbose=False)
        cam = zcam.PyZEDCamera()
        self.image = core.PyMat()
        status = cam.open(init)
        if status != tp.PyERROR_CODE.PySUCCESS:
            print(repr(status))
            exit()

        transform = core.PyTransform()
        tracking_params = zcam.PyTrackingParameters(transform)
        cam.enable_tracking(tracking_params)

        runtime = zcam.PyRuntimeParameters()
        camera_pose = zcam.PyPose()

        py_translation = core.PyTranslation()
        print("Starting ZEDPositioning")
        self.start_zed(cam, runtime, camera_pose, py_translation)
def main():
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.PyCOORDINATE_SYSTEM.PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = core.PyTransform(
    )  # First create a PyTransform object for PyTrackingParameters object
    tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = zcam.PyPose()
    runtime_parameters = zcam.PyRuntimeParameters()

    while i < 1000:
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose,
                             sl.PyREFERENCE_FRAME.PyREFERENCE_FRAME_WORLD)

            # Display the translation and timestamp
            py_translation = core.PyTranslation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".
                  format(tx, ty, tz, zed_pose.timestamp))

            # Display the orientation quaternion
            py_orientation = core.PyOrientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                ox, oy, oz, ow))

            i = i + 1

    # Close the camera
    zed.close()
コード例 #4
0
 def enable_tracking(self, init_vector=(0.0, 0.0, 0.0), load_area=False):
     params = default_tracking_params()
     if load_area is not False: params.area_file_path = load_area
     self.overall_status = self.camera.enable_tracking(params)
     # enables tracking and updates overall_status at the same time
     translation = core.PyTranslation()
     translation.init_vector(init_vector[0], init_vector[1],
                             init_vector[2])  # X, Y, Z
     transform = core.PyTransform()
     transform.set_translation(translation)
     params.set_initial_world_transform(transform)
     # sets position equal to coordinates in init_vector
     # TODO TODO TODO self.transformer = rigid_pose_transform((-init_vector[0], -init_vector[1], -init_vector[2]))
     # saves function that transforms poses such that they ignore the init_vector
     self.pose = zcam.PyPose()
     self.tracking_status = 'Enabled'
コード例 #5
0
def main():
    connection_string = '/dev/ttyACM0'
    print("Connecting to vehicle on: %s" % (connection_string))
    vehicle = connect(connection_string,
                      wait_ready=True,
                      vehicle_class=MyVehicle)

    print('zed prep')
    zed, image, zed_pose = initZed(30)
    print('zed open')
    index, time_prev, time_current, time_bias = 0, 0, 0, 0
    key = ' '
    file = open("Data/0_data.txt", "w")
    while key != 113:
        if zed.grab(zcam.PyRuntimeParameters()) == tp.PyERROR_CODE.PySUCCESS:
            if index % 3 == 0:
                zed.get_position(zed_pose,
                                 sl.PyREFERENCE_FRAME.PyREFERENCE_FRAME_WORLD)
                tx, ty, tz = getPos(zed_pose, core.PyTranslation())

                zed.retrieve_image(image, sl.PyVIEW.PyVIEW_LEFT)
                img = image.get_data()[:, :, 0:3]
                cv2.imshow('dd', img)
                key = cv2.waitKey(1)
                cv2.imwrite('Data/' + str(index) + '.jpg', img)

                time_prev, freq = getFreq(time_prev,
                                          zed.get_camera_timestamp())
                file.write(
                    "%f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f\n"
                    % (1 / freq, tx, ty, tz, vehicle.location._lat,
                       vehicle.location._lon, vehicle.location._relative_alt,
                       vehicle.attitude.roll, vehicle.attitude.pitch,
                       vehicle.attitude.yaw, vehicle._pitchspeed,
                       vehicle._rollspeed, vehicle._yawspeed,
                       vehicle.raw_imu.xacc, vehicle.raw_imu.yacc,
                       vehicle.raw_imu.zacc, vehicle.raw_imu.xgyro,
                       vehicle.raw_imu.ygyro, vehicle.raw_imu.zgyro,
                       vehicle.raw_imu.xmag, vehicle.raw_imu.ymag,
                       vehicle.raw_imu.zmag))
            index += 1

    cv2.destroyAllWindows()
    file.close()
    zed.close()
コード例 #6
0
 def position(self):
     if self._tracking_status == sl.PyTRACKING_STATE.PyTRACKING_STATE_OK:
         return self.pose.get_translation(core.PyTranslation()).get()
     else:
         return None