コード例 #1
0
def main():
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.PyCOORDINATE_SYSTEM.PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Enable positional tracking with default parameters.
    # Positional tracking needs to be enabled before using spatial mapping
    py_transform = core.PyTransform()
    tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Enable spatial mapping
    mapping_parameters = zcam.PySpatialMappingParameters()
    err = zed.enable_spatial_mapping(mapping_parameters)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Grab data during 500 frames
    i = 0
    py_mesh = mesh.PyMesh()  # Create a PyMesh object

    while i < 500:
        # For each new grab, mesh data is updated
        if zed.grab(zcam.PyRuntimeParameters()) == tp.PyERROR_CODE.PySUCCESS:
            # In the background, spatial mapping will use newly retrieved images, depth and pose to update the mesh
            mapping_state = zed.get_spatial_mapping_state()

            print("\rImages captured: {0} / 500 || {1}".format(
                i, mapping_state))

            i = i + 1

    print("\n")

    # Extract, filter and save the mesh in an obj file
    print("Extracting Mesh...\n")
    zed.extract_whole_mesh(py_mesh)
    print("Filtering Mesh...\n")
    py_mesh.filter(mesh.PyMeshFilterParameters()
                   )  # Filter the mesh (remove unnecessary vertices and faces)
    print("Saving Mesh...\n")
    py_mesh.save("mesh.obj")

    # Disable tracking and mapping and close the camera
    zed.disable_spatial_mapping()
    zed.disable_tracking()
    zed.close()
def main():

    init = zcam.PyInitParameters(
        camera_resolution=sl.PyRESOLUTION.PyRESOLUTION_HD720,
        depth_mode=sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE,
        coordinate_units=sl.PyUNIT.PyUNIT_METER,
        coordinate_system=sl.PyCOORDINATE_SYSTEM.
        PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
        sdk_verbose=True)
    cam = zcam.PyZEDCamera()
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    transform = core.PyTransform()
    tracking_params = zcam.PyTrackingParameters(transform)
    cam.enable_tracking(tracking_params)

    runtime = zcam.PyRuntimeParameters()
    camera_pose = zcam.PyPose()

    viewer = tv.PyTrackingViewer()
    viewer.init()

    py_translation = core.PyTranslation()

    start_zed(cam, runtime, camera_pose, viewer, py_translation)

    viewer.exit()
    glutMainLoop()
def run(cam, runtime, camera_pose, viewer, py_translation):
    while True:
        if cam.grab(runtime) == tp.PyERROR_CODE.PySUCCESS:
            tracking_state = cam.get_position(camera_pose)
            text_translation = ""
            text_rotation = ""
            if tracking_state == sl.PyTRACKING_STATE.PyTRACKING_STATE_OK:
                rotation = camera_pose.get_rotation_vector()
                rx = round(rotation[0], 2)
                ry = round(rotation[1], 2)
                rz = round(rotation[2], 2)

                translation = camera_pose.get_translation(py_translation)
                tx = round(translation.get()[0], 2)
                ty = round(translation.get()[1], 2)
                tz = round(translation.get()[2], 2)

                text_translation = str((tx, ty, tz))
                text_rotation = str((rx, ry, rz))
                pose_data = camera_pose.pose_data(core.PyTransform())
                viewer.update_zed_position(pose_data)

            viewer.update_text(text_translation, text_rotation, tracking_state)
        else:
            tp.c_sleep_ms(1)
コード例 #4
0
	def run2D(self, cam, runtime, camera_pose,  py_translation):
		pts = 0
		while True:
			if cam.grab(runtime) == tp.PyERROR_CODE.PySUCCESS:
				tracking_state = cam.get_position(camera_pose)
				text_translation = ""
				text_rotation = ""

			if tracking_state == sl.PyTRACKING_STATE.PyTRACKING_STATE_OK:
				rotation = camera_pose.get_rotation_vector()
				rx = round(rotation[0], 2)
				ry = round(rotation[1], 2)

				translation = camera_pose.get_translation(py_translation)
				tx = round(translation.get()[0], 5)
				ty = round(translation.get()[1], 5)

                plt.scatter(tx, ty)
                plt.draw()
				# store data point
				data_point = [time.time(), rx, ry, tx, ty]
				self.data.append(data_point)

				text_translation = str((tx, ty))
				text_rotation = str((rx, ry))
				pose_data = camera_pose.pose_data(core.PyTransform())
コード例 #5
0
    def __init__(self, inputQueue, outputQueue, visualize):
        self.inputQueue = inputQueue
        self.outputQueue = outputQueue
        #self.outputQueueImages = outputQueueImages
        self.visualize = visualize
        self.xlist = []
        self.ylist = []
        if (self.visualize):
            plt.figure(1)
        init = zcam.PyInitParameters(
            camera_resolution=sl.PyRESOLUTION.PyRESOLUTION_VGA,
            depth_mode=sl.PyDEPTH_MODE.PyDEPTH_MODE_PERFORMANCE,
            coordinate_units=sl.PyUNIT.PyUNIT_METER,
            coordinate_system=sl.PyCOORDINATE_SYSTEM.
            PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP,
            sdk_verbose=False)
        cam = zcam.PyZEDCamera()
        self.image = core.PyMat()
        status = cam.open(init)
        if status != tp.PyERROR_CODE.PySUCCESS:
            print(repr(status))
            exit()

        transform = core.PyTransform()
        tracking_params = zcam.PyTrackingParameters(transform)
        cam.enable_tracking(tracking_params)

        runtime = zcam.PyRuntimeParameters()
        camera_pose = zcam.PyPose()

        py_translation = core.PyTranslation()
        print("Starting ZEDPositioning")
        self.start_zed(cam, runtime, camera_pose, py_translation)
コード例 #6
0
def initZed(fps):
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()
    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD1080  # Use HD1080 video mode
    init_params.camera_fps = fps  # Set fps at 30

    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.PyCOORDINATE_SYSTEM.PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = core.PyTransform(
    )  # First create a PyTransform object for PyTrackingParameters object
    tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    image = core.PyMat()
    zed_pose = zcam.PyPose()
    return zed, image, zed_pose
def main():
    # Create a PyZEDCamera object
    zed = zcam.PyZEDCamera()

    # Create a PyInitParameters object and set configuration parameters
    init_params = zcam.PyInitParameters()
    init_params.camera_resolution = sl.PyRESOLUTION.PyRESOLUTION_HD720  # Use HD720 video mode (default fps: 60)
    # Use a right-handed Y-up coordinate system
    init_params.coordinate_system = sl.PyCOORDINATE_SYSTEM.PyCOORDINATE_SYSTEM_RIGHT_HANDED_Y_UP
    init_params.coordinate_units = sl.PyUNIT.PyUNIT_METER  # Set units in meters

    # Open the camera
    err = zed.open(init_params)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Enable positional tracking with default parameters
    py_transform = core.PyTransform(
    )  # First create a PyTransform object for PyTrackingParameters object
    tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
    err = zed.enable_tracking(tracking_parameters)
    if err != tp.PyERROR_CODE.PySUCCESS:
        exit(1)

    # Track the camera position during 1000 frames
    i = 0
    zed_pose = zcam.PyPose()
    runtime_parameters = zcam.PyRuntimeParameters()

    while i < 1000:
        if zed.grab(runtime_parameters) == tp.PyERROR_CODE.PySUCCESS:
            # Get the pose of the left eye of the camera with reference to the world frame
            zed.get_position(zed_pose,
                             sl.PyREFERENCE_FRAME.PyREFERENCE_FRAME_WORLD)

            # Display the translation and timestamp
            py_translation = core.PyTranslation()
            tx = round(zed_pose.get_translation(py_translation).get()[0], 3)
            ty = round(zed_pose.get_translation(py_translation).get()[1], 3)
            tz = round(zed_pose.get_translation(py_translation).get()[2], 3)
            print("Translation: Tx: {0}, Ty: {1}, Tz {2}, Timestamp: {3}\n".
                  format(tx, ty, tz, zed_pose.timestamp))

            # Display the orientation quaternion
            py_orientation = core.PyOrientation()
            ox = round(zed_pose.get_orientation(py_orientation).get()[0], 3)
            oy = round(zed_pose.get_orientation(py_orientation).get()[1], 3)
            oz = round(zed_pose.get_orientation(py_orientation).get()[2], 3)
            ow = round(zed_pose.get_orientation(py_orientation).get()[3], 3)
            print("Orientation: Ox: {0}, Oy: {1}, Oz {2}, Ow: {3}\n".format(
                ox, oy, oz, ow))

            i = i + 1

    # Close the camera
    zed.close()
コード例 #8
0
    def run(self, cam, runtime, camera_pose, py_translation):
        while True:
            starttime = time()
            if cam.grab(runtime) == tp.PyERROR_CODE.PySUCCESS:
                cam.retrieve_image(self.image, sl.PyVIEW.PyVIEW_LEFT)
                # self.CVimage = self.image.get_data()
                # if (self.outputImageQueue.qsize() > 1):
                #     self.outputImageQueue.get()  # Remove last thing from queue
                # else:
                #     self.outputImageQueue.put(self.CVimage)
                tracking_state = cam.get_position(camera_pose)
                text_translation = ""
                text_rotation = ""
                if tracking_state == sl.PyTRACKING_STATE.PyTRACKING_STATE_OK:
                    rotation = camera_pose.get_rotation_vector()
                    rx = round(rotation[0], 2)
                    ry = round(rotation[1], 2)
                    rz = round(rotation[2], 2)

                    translation = camera_pose.get_translation(py_translation)
                    tx = round(translation.get()[0], 2)
                    ty = round(translation.get()[1], 2)
                    tz = round(translation.get()[2], 2)

                    text_translation = str((tx, ty, tz))
                    text_rotation = str((rx, ry, rz))
                    pose_data = camera_pose.pose_data(core.PyTransform())
                    self.x = float(pose_data[0][3])
                    self.y = -float(pose_data[2][3])
                    self.z = float(pose_data[1][3])
                    self.orientation = float(ry) + 3.14159 / 2
                    self.xlist.append(self.x)
                    self.ylist.append(self.y)
                    self.position = [self.x, self.y, self.z, self.orientation]
                    if self.outputQueue.qsize() == 0:
                        self.outputQueue.put(self.position)
                    else:
                        self.outputQueue.get()
                        self.outputQueue.put(self.position)
                    endtime = time()
                    if (self.visualize):
                        # print("Time elapsed Zed Positioning: " + str(endtime-starttime))
                        # print("Approximate fps: " + str(1/(endtime-starttime)))
                        # print("X, Y, Theta: " + str(self.x) + ", " + str(self.y) + ", " + str(self.orientation*180/3.14159))
                        ax2 = plt.subplot(111)
                        ax2.set_aspect("equal")
                        ax2.scatter(self.x, self.y, c='r', s=3)
                        plt.savefig("Test.png")
                        tempimg = cv2.imread("Test.png")
                        cv2.imshow("ZedPositioning", tempimg)
                        os.remove('Test.png')
                        cv2.waitKey(1)
            else:
                tp.c_sleep_ms(1)
コード例 #9
0
 def enable_tracking(self, init_vector=(0.0, 0.0, 0.0), load_area=False):
     params = default_tracking_params()
     if load_area is not False: params.area_file_path = load_area
     self.overall_status = self.camera.enable_tracking(params)
     # enables tracking and updates overall_status at the same time
     translation = core.PyTranslation()
     translation.init_vector(init_vector[0], init_vector[1],
                             init_vector[2])  # X, Y, Z
     transform = core.PyTransform()
     transform.set_translation(translation)
     params.set_initial_world_transform(transform)
     # sets position equal to coordinates in init_vector
     # TODO TODO TODO self.transformer = rigid_pose_transform((-init_vector[0], -init_vector[1], -init_vector[2]))
     # saves function that transforms poses such that they ignore the init_vector
     self.pose = zcam.PyPose()
     self.tracking_status = 'Enabled'
コード例 #10
0
def main():

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    print("Reading SVO file: {0}".format(filepath))

    cam = zcam.PyZEDCamera()
    init = zcam.PyInitParameters(svo_input_filename=filepath)
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    runtime = zcam.PyRuntimeParameters()
    spatial = zcam.PySpatialMappingParameters()
    transform = core.PyTransform()
    tracking = zcam.PyTrackingParameters(transform)

    cam.enable_tracking(tracking)
    cam.enable_spatial_mapping(spatial)

    pymesh = mesh.PyMesh()
    print("Processing...")
    for i in range(200):
        cam.grab(runtime)
        cam.request_mesh_async()

    cam.extract_whole_mesh(pymesh)
    cam.disable_tracking()
    cam.disable_spatial_mapping()

    filter_params = mesh.PyMeshFilterParameters()
    filter_params.set(mesh.PyFILTER.PyFILTER_HIGH)
    print("Filtering params : {0}.".format(pymesh.filter(filter_params)))

    apply_texture = pymesh.apply_texture(
        mesh.PyMESH_TEXTURE_FORMAT.PyMESH_TEXTURE_RGBA)
    print("Applying texture : {0}.".format(apply_texture))
    print_mesh_information(pymesh, apply_texture)

    save_filter(filter_params)
    save_mesh(pymesh)
    cam.close()
    print("\nFINISH")
コード例 #11
0
def default_tracking_params():
    params = zcam.PyTrackingParameters(init_pos=core.PyTransform())
    params.enable_spatial_memory = True
    #params.enable_pose_smoothing = False # Listed in documentation but doesn't work
    return params
コード例 #12
0
temp = 0
failed = 0
if not cam.is_opened():
    print("Opening ZED Camera...")
status = cam.open(init)
if status != tp.PyERROR_CODE.PySUCCESS:
    print(repr(status))
    streamRunning = False
    exit(1)

#print("Sleeping for 5 seconds...")
#time.sleep(5)

mat = core.PyMat()

py_transform = core.PyTransform()
tracking_parameters = zcam.PyTrackingParameters(init_pos=py_transform)
err = cam.enable_tracking(tracking_parameters)
if err != tp.PyERROR_CODE.PySUCCESS:
    print("Positional Tracking Failed")

zed_pose = zcam.PyPose()
zed_imu = zcam.PyIMUData()
py_mesh = mesh.PyMesh()  # Create a PyMesh object

cam.set_camera_settings(sl.PyCAMERA_SETTINGS.PyCAMERA_SETTINGS_EXPOSURE, 60)
current_value = cam.get_camera_settings(
    sl.PyCAMERA_SETTINGS.PyCAMERA_SETTINGS_EXPOSURE)
print("Exposure: " + str(current_value))

print_camera_information(cam)
コード例 #13
0
    def __init__(self):
        self.body_io = []
        self.path_mem = []
        self.path = core.PyTransform()

        self.set_path(self.path, self.path_mem)