def ingest_in_objects_queue(self, batch):
        '''
        ingest_in_objects_queue: 
            Converts a list of batched objects from SDK get_objects_batch() to a sorted list of sl.Objects
            Use this function to fill a deque of sl.Objects that can be considered and used as a stream of objects with a delay
        
        Parameters:
            batch (list[sl.ObjectsBatch]): list of sl.ObjectsBatch objects obtained from calling get_objects_batch
        '''
        # If the list is empty, do nothing
        if not batch:
            return

        # Add objects in dict with timestamp as key
        list_of_new_objects = {}
        for current_traj in batch:
            # Impossible but still better to check
            if len(current_traj.timestamps) != len(current_traj.positions):
                continue
            # For each sample, construct an ObjectData and put it in the corresponding sl.Objects
            for i in range(len(current_traj.timestamps)):
                ts = current_traj.timestamps[i]
                new_object_data = sl.ObjectData()
                new_object_data.id = current_traj.id
                new_object_data.tracking_state = current_traj.tracking_state
                new_object_data.position = current_traj.positions[i]
                new_object_data.label = current_traj.label
                new_object_data.sublabel = current_traj.sublabel
                new_object_data.bounding_box_2d = current_traj.bounding_boxes_2d[
                    i]
                new_object_data.bounding_box = current_traj.bounding_boxes[i]

                # Check if a detected object with the current timestamp already exists in the map
                if (ts.get_milliseconds() in list_of_new_objects.keys()):
                    # Append new_object_data to the object_list
                    list_of_new_objects[ts.get_milliseconds(
                    )].object_list = list_of_new_objects[
                        ts.get_milliseconds()].object_list + [new_object_data]
                else:
                    current_obj = sl.Objects()
                    current_obj.timestamp = ts.data_ns  # sl.Objects.timestamp can be set with time in nanoseconds
                    current_obj.is_new = True
                    current_obj.is_tracked = True
                    # Append new_object_data to the object_list
                    current_obj.object_list = current_obj.object_list + [
                        new_object_data
                    ]
                    list_of_new_objects[ts.get_milliseconds()] = current_obj

        # Ingest in Queue of objects that will be emptied by the main loop
        # Since dicts are sorted by key, we are sure that timestamps are continous.
        for value in list_of_new_objects.values():
            self.objects_tracked_queue.append(value)
Esempio n. 2
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.camera_fps = 15  # Set fps at 15

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Capture 50 frames and stop
    mat = sl.Mat()
    runtime_parameters = sl.RuntimeParameters()
    key = ''

    obj_param = sl.ObjectDetectionParameters()
    obj_param.enable_tracking = False

    zed.enable_object_detection(obj_param)

    objects = sl.Objects()
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    obj_runtime_param.detection_confidence_threshold = 40

    while key != 113:  # for 'q' key
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # A new image is available if grab() returns SUCCESS
            zed.retrieve_image(mat, sl.VIEW.LEFT)
            zed.retrieve_objects(objects, obj_runtime_param)
            obj_array = objects.object_list
            image_data = mat.get_data()
            for i in range(len(obj_array)):
                obj_data = obj_array[i]
                bounding_box = obj_data.bounding_box_2d
                cv2.rectangle(
                    image_data,
                    (int(bounding_box[0, 0]), int(bounding_box[0, 1])),
                    (int(bounding_box[2, 0]), int(bounding_box[2, 1])),
                    get_color_id_gr(int(obj_data.id)), 3)

            cv2.imshow("ZED", image_data)
        key = cv2.waitKey(5)

    cv2.destroyAllWindows()

    # Close the camera
    zed.close()
    def pop(self, local_pose, world_pose, image, depth, objects):
        '''
        pop
            pop data from the FIFO system
        
        Parameters:
            local_pose (sl.Pose): pose of the camera in camera reference frame at objects timestamp
            world_pose (sl.Pose): pose of the camera in world reference frame at objects timestamp
            image (sl.Mat): image data at objects timestamp
            depth (sl.Mat): depth data at objects timestamp
            objects (sl.Objects): objects in the past
        '''
        objects = sl.Objects()
        local_pose = sl.Pose()
        world_pose = sl.Pose()

        if self.objects_tracked_queue:
            tracked_merged_obj = self.objects_tracked_queue[0]
            if (self.init_queue_ts.data_ns == 0):
                self.init_queue_ts = tracked_merged_obj.timestamp

            targetTS_ms = tracked_merged_obj.timestamp.get_milliseconds()

            local_pose = self.find_closest_local_pose_from_ts(targetTS_ms)
            world_pose = self.find_closest_world_pose_from_ts(targetTS_ms)

            if WITH_IMAGE_RETENTION:
                tmp_image = self.find_closest_image_from_ts(targetTS_ms)
                tmp_image.copy_to(image)
                tmp_image.free(sl.MEM.CPU)
                self.image_map_ms[targetTS_ms].free(sl.MEM.CPU)
                del self.image_map_ms[targetTS_ms]

                tmp_depth = self.find_closest_depth_from_ts(targetTS_ms)
                tmp_depth.copy_to(depth)
                tmp_depth.free(sl.MEM.CPU)
                self.depth_map_ms[targetTS_ms].free(sl.MEM.CPU)
                del self.depth_map_ms[targetTS_ms]

            objects = tracked_merged_obj
            self.objects_tracked_queue.popleft()

        return local_pose, world_pose, image, depth, objects
 def __init__(self):
     # 初始化关于摄像头的参数
     self.zed2 = sl.Camera()
     self.init_params = sl.InitParameters()
     self.init_params.camera_resolution = sl.RESOLUTION.VGA
     self.init_params.camera_fps = 15 # default 15
     self.init_params.coordinate_units = sl.UNIT.METER
     self.init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
     _ = self.zed2.open(self.init_params)
 
     self.mat = sl.Mat()
     self.objects = sl.Objects()
     self.point_cloud = sl.Mat()
 
     self.runtime_params = sl.RuntimeParameters()
     self.runtime_params.sensing_mode = sl.SENSING_MODE.STANDARD    
     self.obj_param = sl.ObjectDetectionParameters()
     self.obj_param.enable_tracking = False
     self.zed2.enable_object_detection(self.obj_param)
     self.obj_runtime_params = sl.ObjectDetectionRuntimeParameters()
     self.obj_runtime_params.detection_confidence_threshold = 40        
Esempio n. 5
0
        zed.enable_positional_tracking()
        
    zed.enable_object_detection(obj_param)

    camera_info = zed.get_camera_information()
    # Create OpenGL viewer
    viewer = gl.GLViewer()
    viewer.init(camera_info.calibration_parameters.left_cam)

    # Configure object detection runtime parameters
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    obj_runtime_param.detection_confidence_threshold = 50
    obj_runtime_param.object_class_filter = [sl.OBJECT_CLASS.PERSON]    # Only detect Persons

    # Create ZED objects filled in the main loop
    objects = sl.Objects()
    image = sl.Mat()

    while viewer.is_available():
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.VIEW.LEFT)
            # Retrieve objects
            zed.retrieve_objects(objects, obj_runtime_param)
            # Update GL view
            viewer.update_view(image, objects)

    viewer.exit()

    image.free(memory_type=sl.MEM.CPU)
Esempio n. 6
0
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init_params.coordinate_units = sl.UNIT.METER
    init_params.sdk_verbose = True

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    obj_param = sl.ObjectDetectionParameters()
    # Different model can be chosen, optimizing the runtime or the accuracy
    obj_param.detection_model = sl.DETECTION_MODEL.HUMAN_BODY_FAST
    obj_param.enable_tracking = True
    obj_param.image_sync = True
    obj_param.enable_mask_output = False
    # Optimize the person joints position, requires more computations
    obj_param.enable_body_fitting = True

    camera_infos = zed.get_camera_information()
    if obj_param.enable_tracking:
        positional_tracking_param = sl.PositionalTrackingParameters()
        # positional_tracking_param.set_as_static = True
        positional_tracking_param.set_floor_as_origin = True
        zed.enable_positional_tracking(positional_tracking_param)

    print("Object Detection: Loading Module...")

    err = zed.enable_object_detection(obj_param)
    if err != sl.ERROR_CODE.SUCCESS:
        print(repr(err))
        zed.close()
        exit(1)

    objects = sl.Objects()
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    # For outdoor scene or long range, the confidence should be lowered to avoid missing detections (~20-30)
    # For indoor scene or closer range, a higher confidence limits the risk of false positives and increase the precision (~50+)
    obj_runtime_param.detection_confidence_threshold = 40

    while zed.grab() == sl.ERROR_CODE.SUCCESS:
        err = zed.retrieve_objects(objects, obj_runtime_param)
        if objects.is_new:
            obj_array = objects.object_list
            print(str(len(obj_array)) + " Person(s) detected\n")
            if len(obj_array) > 0:
                first_object = obj_array[0]
                print("First Person attributes:")
                print(" Confidence (" + str(int(first_object.confidence)) + "/100)")
                if obj_param.enable_tracking:
                    print(" Tracking ID: " + str(int(first_object.id)) + " tracking state: " + repr(
                        first_object.tracking_state) + " / " + repr(first_object.action_state))
                position = first_object.position
                velocity = first_object.velocity
                dimensions = first_object.dimensions
                print(" 3D position: [{0},{1},{2}]\n Velocity: [{3},{4},{5}]\n 3D dimentions: [{6},{7},{8}]".format(
                    position[0], position[1], position[2], velocity[0], velocity[1], velocity[2], dimensions[0],
                    dimensions[1], dimensions[2]))
                if first_object.mask.is_init():
                    print(" 2D mask available")

                print(" Keypoint 2D ")
                keypoint_2d = first_object.keypoint_2d
                for it in keypoint_2d:
                    print("    " + str(it))
                print("\n Keypoint 3D ")
                keypoint = first_object.keypoint
                for it in keypoint:
                    print("    " + str(it))

                input('\nPress enter to continue: ')

    # Close the camera
    zed.close()
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    #init_params.camera_fps = 15  # Set fps at 15

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    # Capture 50 frames and stop
    mat = sl.Mat()
    runtime_parameters = sl.RuntimeParameters()
    key = ''

    zed.enable_positional_tracking()

    obj_param = sl.ObjectDetectionParameters()
    obj_param.enable_tracking = True
    obj_param.detection_model = sl.DETECTION_MODEL.HUMAN_BODY_FAST

    zed.enable_object_detection(obj_param)

    objects = sl.Objects()
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    obj_runtime_param.detection_confidence_threshold = 50

    while key != 113:  # for 'q' key
        # Grab an image, a RuntimeParameters object must be given to grab()
        if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
            # A new image is available if grab() returns SUCCESS
            zed.retrieve_image(mat, sl.VIEW.LEFT)
            zed.retrieve_objects(objects, obj_runtime_param)
            obj_array = objects.object_list
            image_data = mat.get_data()
            for i in range(len(obj_array)):
                obj_data = obj_array[i]
                bounding_box = obj_data.bounding_box_2d
                cv2.rectangle(
                    image_data,
                    (int(bounding_box[0, 0]), int(bounding_box[0, 1])),
                    (int(bounding_box[2, 0]), int(bounding_box[2, 1])),
                    get_color_id_gr(int(obj_data.id)), 3)

                keypoint = obj_data.keypoint_2d
                for kp in keypoint:
                    if kp[0] > 0 and kp[1] > 0:
                        cv2.circle(image_data, (int(kp[0]), int(kp[1])), 3,
                                   get_color_id_gr(int(obj_data.id)), -1)

                for bone in sl.BODY_BONES:
                    kp1 = keypoint[bone[0].value]
                    kp2 = keypoint[bone[1].value]
                    if kp1[0] > 0 and kp1[1] > 0 and kp2[0] > 0 and kp2[1] > 0:
                        cv2.line(image_data, (int(kp1[0]), int(kp1[1])),
                                 (int(kp2[0]), int(kp2[1])),
                                 get_color_id_gr(int(obj_data.id)), 2)

            cv2.imshow("ZED", image_data)
        key = cv2.waitKey(5)

    cv2.destroyAllWindows()

    # Close the camera
    zed.close()
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init_params.coordinate_units = sl.UNIT.METER
    init_params.sdk_verbose = True

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)

    obj_param = sl.ObjectDetectionParameters()
    obj_param.enable_tracking=True
    obj_param.image_sync=True
    obj_param.enable_mask_output=True

    camera_infos = zed.get_camera_information()
    if obj_param.enable_tracking :
        positional_tracking_param = sl.PositionalTrackingParameters()
        #positional_tracking_param.set_as_static = True
        positional_tracking_param.set_floor_as_origin = True
        zed.enable_positional_tracking(positional_tracking_param)

    print("Object Detection: Loading Module...")

    err = zed.enable_object_detection(obj_param)
    if err != sl.ERROR_CODE.SUCCESS :
        print (repr(err))
        zed.close()
        exit(1)

    objects = sl.Objects()
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    obj_runtime_param.detection_confidence_threshold = 40

    while zed.grab() == sl.ERROR_CODE.SUCCESS:
        err = zed.retrieve_objects(objects, obj_runtime_param)
        if objects.is_new :
            obj_array = objects.object_list
            print(str(len(obj_array))+" Object(s) detected\n")
            if len(obj_array) > 0 :
                first_object = obj_array[0]
                print("First object attributes:")
                print(" Label '"+repr(first_object.label)+"' (conf. "+str(int(first_object.confidence))+"/100)")
                if obj_param.enable_tracking :
                    print(" Tracking ID: "+str(int(first_object.id))+" tracking state: "+repr(first_object.tracking_state)+" / "+repr(first_object.action_state))
                position = first_object.position
                velocity = first_object.velocity
                dimensions = first_object.dimensions
                print(" 3D position: [{0},{1},{2}]\n Velocity: [{3},{4},{5}]\n 3D dimentions: [{6},{7},{8}]".format(position[0],position[1],position[2],velocity[0],velocity[1],velocity[2],dimensions[0],dimensions[1],dimensions[2]))
                if first_object.mask.is_init():
                    print(" 2D mask available")

                print(" Bounding Box 2D ")
                bounding_box_2d = first_object.bounding_box_2d
                for it in bounding_box_2d :
                    print("    "+str(it),end='')
                print("\n Bounding Box 3D ")
                bounding_box = first_object.bounding_box
                for it in bounding_box :
                    print("    "+str(it),end='')

                input('\nPress enter to continue: ')


    # Close the camera
    zed.close()
Esempio n. 9
0
    # 2D viewer utilities
    display_resolution = sl.Resolution(
        min(camera_info.camera_resolution.width, 1280),
        min(camera_info.camera_resolution.height, 720))
    image_scale = [
        display_resolution.width / camera_info.camera_resolution.width,
        display_resolution.height / camera_info.camera_resolution.height
    ]

    # Create OpenGL viewer
    viewer = gl.GLViewer()
    viewer.init(camera_info.calibration_parameters.left_cam,
                obj_param.enable_tracking)

    # Create ZED objects filled in the main loop
    bodies = sl.Objects()
    image = sl.Mat()

    while viewer.is_available():
        # Grab an image
        if zed.grab() == sl.ERROR_CODE.SUCCESS:
            # Retrieve left image
            zed.retrieve_image(image, sl.VIEW.LEFT, sl.MEM.CPU,
                               display_resolution)
            # Retrieve objects
            zed.retrieve_objects(bodies, obj_runtime_param)

            # Update GL view
            viewer.update_view(image, bodies)
            # Update OCV view
            image_left_ocv = image.get_data()
def main():
    # Create a Camera object
    zed = sl.Camera()

    # Create a InitParameters object and set configuration parameters
    init_params = sl.InitParameters()
    init_params.camera_resolution = sl.RESOLUTION.HD720  # Use HD720 video mode
    init_params.depth_mode = sl.DEPTH_MODE.PERFORMANCE
    init_params.coordinate_units = sl.UNIT.METER
    init_params.sdk_verbose = True

    # Open the camera
    err = zed.open(init_params)
    if err != sl.ERROR_CODE.SUCCESS:
        exit(1)
        
    obj_param = sl.ObjectDetectionParameters()
    obj_param.enable_tracking=True
    obj_param.image_sync=True
    obj_param.enable_mask_output=True

    camera_infos = zed.get_camera_information()
    if obj_param.enable_tracking :
        positional_tracking_param = sl.PositionalTrackingParameters()
        #positional_tracking_param.set_as_static = True
        positional_tracking_param.set_floor_as_origin = True
        zed.enable_positional_tracking(positional_tracking_param)

    print("Object Detection: Loading Module...")

    err = zed.enable_object_detection(obj_param)
    if err != sl.ERROR_CODE.SUCCESS :
        print (repr(err))
        zed.close()
        exit(1)

    objects = sl.Objects()
    obj_runtime_param = sl.ObjectDetectionRuntimeParameters()
    obj_runtime_param.detection_confidence_threshold = 40
    
    while zed.grab() == sl.ERROR_CODE.SUCCESS:
        err = zed.retrieve_objects(objects, obj_runtime_param)
        start = timeit.default_timer()
        if objects.is_new :
            obj_array = objects.object_list
            if len(obj_array) > 0 :
                first_object = obj_array[0]
                print("First object attributes:")
                print(" Label '"+repr(first_object.label)+"' (conf. "+str(int(first_object.confidence))+"/100)")
                position = first_object.position
                dimensions = first_object.dimensions
                print(" 3D position: [{0},{1},{2}]\n 3D dimentions: [{3},{4},{5}]".format(position[0],position[1],position[2],dimensions[0],dimensions[1],dimensions[2]))

                ######################
                image = sl.Mat()
                depth = sl.Mat()
                point_cloud = sl.Mat()
                mirror_ref = sl.Transform()
                mirror_ref.set_translation(sl.Translation(2.75,4.0,0))
                tr_np = mirror_ref.m
                zed.retrieve_image(image, sl.VIEW.LEFT)
                # Retrieve depth map. Depth is aligned on the left image
                zed.retrieve_measure(depth, sl.MEASURE.DEPTH)
                # Retrieve colored point cloud. Point cloud is aligned on the left image.
                zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA)
                x = round(image.get_width() / 2)
                y = round(image.get_height() / 2)
                err, point_cloud_value = point_cloud.get_value(x, y)

                distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
                                 point_cloud_value[1] * point_cloud_value[1] +
                                 point_cloud_value[2] * point_cloud_value[2])

                point_cloud_np = point_cloud.get_data()
                point_cloud_np.dot(tr_np)
                if not np.isnan(distance) and not np.isinf(distance):
                    print("Distance to Camera at ({}, {}) (image center): {:1.3} m".format(x, y, distance), end="\r")
                else:
                    pass
                
                print("\n Bounding Box 3D ")
                bounding_box = first_object.bounding_box
                stop = timeit.default_timer()
                print("\n FPS:", stop - start)


    # Close the camera
    zed.close()