def main():

    init = zcam.PyInitParameters(
        camera_resolution=sl.PyRESOLUTION.PyRESOLUTION_HD720,
        depth_mode=sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE,
        coordinate_units=sl.PyUNIT.PyUNIT_METER,
        coordinate_system=sl.PyCOORDINATE_SYSTEM.
        PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
        sdk_verbose=True)
    cam = zcam.PyZEDCamera()
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    transform = core.PyTransform()
    tracking_params = zcam.PyTrackingParameters(transform)
    cam.enable_tracking(tracking_params)

    runtime = zcam.PyRuntimeParameters()
    camera_pose = zcam.PyPose()

    viewer = tv.PyTrackingViewer()
    viewer.init()

    py_translation = core.PyTranslation()

    start_zed(cam, runtime, camera_pose, viewer, py_translation)

    viewer.exit()
    glutMainLoop()
Beispiel #2
0
def initZed(fps):
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()
    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD1080  # Use HD1080 video mode
    init_params.camera_fps = fps  # Set fps at 30

    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.PyCOORDINATE_SYSTEM.PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = core.PyTransform(
    )  # First create a PyTransform object for PyTrackingParameters object
    tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    image = core.PyMat()
    zed_pose = zcam.PyPose()
    return zed, image, zed_pose
Beispiel #3
0
    def __init__(self, inputQueue, outputQueue, visualize):
        self.inputQueue = inputQueue
        self.outputQueue = outputQueue
        #self.outputQueueImages = outputQueueImages
        self.visualize = visualize
        self.xlist = []
        self.ylist = []
        if (self.visualize):
            plt.figure(1)
        init = zcam.PyInitParameters(
            camera_resolution=sl.PyRESOLUTION.PyRESOLUTION_VGA,
            depth_mode=sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE,
            coordinate_units=sl.PyUNIT.PyUNIT_METER,
            coordinate_system=sl.PyCOORDINATE_SYSTEM.
            PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
            sdk_verbose=False)
        cam = zcam.PyZEDCamera()
        self.image = core.PyMat()
        status = cam.open(init)
        if status != tp.PyERROR_CODE.PySUCCESS:
            print(repr(status))
            exit()

        transform = core.PyTransform()
        tracking_params = zcam.PyTrackingParameters(transform)
        cam.enable_tracking(tracking_params)

        runtime = zcam.PyRuntimeParameters()
        camera_pose = zcam.PyPose()

        py_translation = core.PyTranslation()
        print("Starting ZEDPositioning")
        self.start_zed(cam, runtime, camera_pose, py_translation)
def main():
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.PyCOORDINATE_SYSTEM.PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = core.PyTransform(
    )  # First create a PyTransform object for PyTrackingParameters object
    tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = zcam.PyPose()
    runtime_parameters = zcam.PyRuntimeParameters()

    while i < 1000:
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose,
                             sl.PyREFERENCE_FRAME.PyREFERENCE_FRAME_WORLD)

            # Display the translation and timestamp
            py_translation = core.PyTranslation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".
                  format(tx, ty, tz, zed_pose.timestamp))

            # Display the orientation quaternion
            py_orientation = core.PyOrientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                ox, oy, oz, ow))

            i = i + 1

    # Close the camera
    zed.close()
Beispiel #5
0
 def enable_tracking(self, init_vector=(0.0, 0.0, 0.0), load_area=False):
     params = default_tracking_params()
     if load_area is not False: params.area_file_path = load_area
     self.overall_status = self.camera.enable_tracking(params)
     # enables tracking and updates overall_status at the same time
     translation = core.PyTranslation()
     translation.init_vector(init_vector[0], init_vector[1],
                             init_vector[2])  # X, Y, Z
     transform = core.PyTransform()
     transform.set_translation(translation)
     params.set_initial_world_transform(transform)
     # sets position equal to coordinates in init_vector
     # TODO TODO TODO self.transformer = rigid_pose_transform((-init_vector[0], -init_vector[1], -init_vector[2]))
     # saves function that transforms poses such that they ignore the init_vector
     self.pose = zcam.PyPose()
     self.tracking_status = 'Enabled'
Beispiel #6
0
    print(repr(status))
    streamRunning = False
    exit(1)

#print("Sleeping for 5 seconds...")
#time.sleep(5)

mat = core.PyMat()

py_transform = core.PyTransform()
tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
err = cam.enable_tracking(tracking_parameters)
if err != tp.PyERROR_CODE.PySUCCESS:
    print("Positional Tracking Failed")

zed_pose = zcam.PyPose()
zed_imu = zcam.PyIMUData()
py_mesh = mesh.PyMesh()  # Create a PyMesh object

cam.set_camera_settings(sl.PyCAMERA_SETTINGS.PyCAMERA_SETTINGS_EXPOSURE, 60)
current_value = cam.get_camera_settings(
    sl.PyCAMERA_SETTINGS.PyCAMERA_SETTINGS_EXPOSURE)
print("Exposure: " + str(current_value))

print_camera_information(cam)

now = datetime.datetime.now()
print("Vision Log for " + str(now.day) + "/" + str(now.month) + "/" +
      str(now.year) + " ~ " + str(now.hour) + ":" + str(now.minute) + ":" +
      str(now.second))
print("OpenCV version: " + str(cv2.__version__))