Exemplo n.º 1
0
 def __init__(self, toggle_new_process: bool = False):
     """
     :param toggle_new_process: Open a new process to stream data
     """
     self._toogle_new_process = toggle_new_process
     if toggle_new_process:
         self._req_q = mp.Queue()  # queue to require data
         self._res_q = mp.Queue()  # queue to receive data
         self._pipeline = _DataPipeline(
             req_q=self._req_q,
             res_q=self._res_q,
         )
         self._pipeline.start()
     else:
         self._pipeline = rs.pipeline()
         self._config = rs.config()
         # Setup config
         self._config.enable_stream(rs.stream.depth, DEPTH_RESOLUTION[0],
                                    DEPTH_RESOLUTION[1], rs.format.z16,
                                    DEPTH_FPS)
         self._config.enable_stream(rs.stream.color, COLOR_RESOLUTION[0],
                                    COLOR_RESOLUTION[1], rs.format.bgr8,
                                    COLOR_FPS)
         # Start streaming with chosen configuration
         self._pipeline.start(self._config)
         # Declare pointcloud object, for calculating pointclouds and texture mappings
         self._pc = rs.pointcloud()
Exemplo n.º 2
0
    def get_frame(self):
        frames = self.pipeline.wait_for_frames()
        aligned_frames = self.align.process(frames)

        aligned_depth_frame = aligned_frames.get_depth_frame()
        aligned_color_frame = aligned_frames.get_color_frame()
        if self._cfg["name"] == 'L515':
            aligned_ir_frame = aligned_frames.get_infrared_frame()
        else:
            aligned_irL_frame = aligned_frames.get_infrared_frame(1)
            aligned_irR_frame = aligned_frames.get_infrared_frame(2)

        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        depth_image = depth_image * self.scale
        color_image = np.asanyarray(aligned_color_frame.get_data())
        if self._cfg["name"] == 'L515':
            infrared = np.asanyarray(aligned_ir_frame.get_data())
        else:
            infrared_L = np.asanyarray(aligned_irL_frame.get_data())
            infrared_R = np.asanyarray(aligned_irR_frame.get_data())

        pc = rs.pointcloud()
        point_cloud = pc.calculate(aligned_depth_frame)
        # point_cloud = None
        if self._cfg["name"] == 'L515':
            return Frame([color_image], [depth_image], [point_cloud],
                         [infrared])
        else:
            return Frame([color_image], [depth_image], [point_cloud],
                         [infrared_L, infrared_R])
Exemplo n.º 3
0
    def get_pointcloud(self):
        pc = rs.pointcloud()
        # points = rs.points()
        self.lock.acquire()
        if(self.depth_frame is not None):
            # img = np.asanyarray(self.depth_frame.get_data(),dtype=np.float32)
            # [height, width] = img.shape
            # nx = np.linspace(0, width-1, width)
            # ny = np.linspace(0, height-1, height)
            # u, v = np.meshgrid(nx, ny)
            # x = (u.flatten() - self.intrinsics["depth"].ppx)/self.intrinsics["depth"].fx
            # y = (v.flatten() - self.intrinsics["depth"].ppy)/self.intrinsics["depth"].fy
            #
            # z = img.flatten() / 1000;
            # x = np.multiply(x,z)
            # y = np.multiply(y,z)
            #
            # x = x[np.nonzero(z)]
            # y = y[np.nonzero(z)]
            # z = z[np.nonzero(z)]
            # points = np.concatenate([[x,y,z]]).T

            pc.map_to(self.rgb_frame)
            points = pc.calculate(self.depth_frame)
            # points = np.asarray(points.get_vertices(), np.float32)
            # print(points.shape, len(points))
            # points = np.asarray(points.get_data())
            # vtx = np.asarray(points.get_vertices())
            # # points.("1.ply", cam.frames.get_color_frame())
        else: points = None
        self.lock.release()
        return points
Exemplo n.º 4
0
def capture(pipelines, serial_numbers, prefix, postfix):

    pc = rs.pointcloud()
    points = rs.points()
    for camNo, pipe in enumerate(pipelines):
        frames = pipe.wait_for_frames()

        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        pc.map_to(color_frame)
        points = pc.calculate(depth_frame)
        points.export_to_ply(
            prefix + str(serial_numbers[camNo]) + '_' + postfix + '.ply',
            color_frame)

        print('Pointcloud saved for camera with serial number',
              serial_numbers[camNo])

        color_image = np.asanyarray(color_frame.get_data())
        imsave(
            prefix + str(serial_numbers[camNo]) + '_' + postfix +
            'color_fore.tif', color_image)

        tex_coor = np.asanyarray(points.get_texture_coordinates_EXT())
        imsave(
            prefix + str(serial_numbers[camNo]) + '_' + postfix +
            'texture_fore.tif', tex_coor)
Exemplo n.º 5
0
    def get_frame(self):
        success, frames = self.pipe.try_wait_for_frames()

        if not success:
            return None

        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        w = rs.video_frame(depth_frame).width
        h = rs.video_frame(depth_frame).height

        #         if self.ICP:
        #             self.save_frame_to_img(depth_frame, color_frame)

        pc = rs.pointcloud()
        points = pc.calculate(depth_frame)

        vertices = np.asanyarray(points.get_vertices()).view(
            np.float32).reshape(h, w, 3)
        vertices = self.preprocess_point_cloud(vertices)

        self.frame_counter += 1

        if self.ICP:
            return points, color_frame

        return vertices
Exemplo n.º 6
0
def get_pointcloud(depth_image, color_frame, img, img_size):
    point_cloud = rs.pointcloud()
    points = rs.points()

    # Obtain point cloud data
    point_cloud.map_to(color_frame)
    points = point_cloud.calculate(depth_image)

    # Convert point cloud to 2d Array
    points3d = np.asanyarray(points.get_vertices())
    points3d = points3d.view(np.float32).reshape(points3d.shape + (-1, ))
    texture_coords = np.asanyarray(points.get_texture_coordinates())
    texture_coords = texture_coords.view(
        np.float32).reshape(texture_coords.shape + (-1, ))

    # Remove all invalid data within a certain distance
    long_distance_mask = points3d[:, 2] < 10
    short_distance_mask = points3d[:, 2] > 0.3
    distance_mask = np.logical_and(long_distance_mask, short_distance_mask)
    points3d = points3d[distance_mask]

    # Sample random points
    idx = np.random.randint(points3d.shape[0],
                            size=round(points3d.shape[0] / 100))
    sampled_points = points3d[idx, :]

    # Get colours of points
    point_colors = []
    point_colors = np.array(point_colors)

    return sampled_points, point_colors
Exemplo n.º 7
0
    def frames_to_adapted_point_cloud(
            self: 'Camera',
            frames: List[rs2.depth_frame],
            filter_predicate=lambda p: False) -> np.ndarray:
        """
        Generates a point cloud and adapts it (see generate_adapted_point_cloud) from a list of frame,
        applying filters on the frames to get one frame and calculating the point cloud.
        :param frames: A list of frames to calculate the point cloud from
        :param filter_predicate:A predicate to indicate whether to remove a point or not (if True, it is removed)
        :return: The point cloud generated after all the calculations
        """
        frame = self.apply_filters(frames)
        pc: rs2.pointcloud = rs2.pointcloud()
        points: np.ndarray = np.array(pc.calculate(frame).get_vertices())

        # Convert to viable np.ndarray, and also filter 'zero' points and accounts for deviation
        points = np.array([(x + self.dx, y + self.dy, z + self.dz)
                           for (x, y, z) in points if not x == y == z == 0])

        # Invert x, y axis and mirror the z axis around the distance line
        utils.change_coordinates_inplace(
            points, lambda p: (-p[0], -p[1], self.distance - p[2]))

        from math import atan
        points = utils.filter_points(
            points, lambda p: not -60 < atan(p[2] / p[0]) < 60)
        # points = utils.filter_points(points, filter_predicate)
        utils.rotate_point_cloud_inplace(points, self.angle)
        return points
Exemplo n.º 8
0
    def __init__(self, pipeline):
        self.pipe = pipeline
        self.scale = 1
        self.w = 640
        self.h = 480
        self.pc = None
        # Processing blocks
        self.pc = rs.pointcloud()
        self.decimate = rs.decimation_filter()
        self.decimate.set_option(rs.option.filter_magnitude, 2**state.decimate)
        self.colorizer = rs.colorizer()
        self.state = state  # General ImageHandler model state
        self.out = out  # General Image handler output if not changed

        # Data from camera
        self.image = None
        self.verts = None

        # Setup camera
        try:
            # Get stream profile and camera intrinsics
            profile = pipeline.get_active_profile()
            depth_profile = rs.video_stream_profile(
                profile.get_stream(rs.stream.depth))
            self.depth_intrinsics = depth_profile.get_intrinsics()
            self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height
            depth_sensor = profile.get_device().first_depth_sensor()
            self.scale = depth_sensor.get_depth_scale()
        except RuntimeError as err:
            print(err)
Exemplo n.º 9
0
    def __init__(self, color, ip, port, topic, interval):
        Publisher.__init__(self, ip, port, topic)
        self.interval = interval
        self.lastUpdate = self.millis(self)

        pipe = rs.pipeline()
        cfg = rs.config()
        cfg.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        cfg.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

        profile = pipe.start(cfg)
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)

        self.running = True
        self.color = color

        zero_vec = (0.0, 0.0, 0.0)
        self.depth_frame = zero_vec
        self.color_frame = zero_vec
        self.depth_colormap = zero_vec

        # Processing blocks
        self.pc = rs.pointcloud()
        self.decimate = rs.decimation_filter()
        #self.decimate.set_option(rs.option.filter_magnitude, 2 ** self.decimate)
        self.colorizer = rs.colorizer()
Exemplo n.º 10
0
   def __init__(self, resolution, frame_rate, camera_settings=None): 
       """ 
       The constructor for ComplexNumber class. 
 
       Parameters: 
           resolution ([int, int]) : desired resolution of the camera.
           frame_rate (int) : camera frame rate.
           camera_settings (dict) : dictionnary of custom parameters .
       """
       assert(resolution in [[1280, 720],
                             [848, 480],
                             [640, 480],
                             [640, 360],
                             [480, 270],
                             [424, 240]])
       self.pipeline = rs.pipeline()
       self.pc = rs.pointcloud()
       config = rs.config()
       config.enable_stream(rs.stream.depth, resolution[0], resolution[1],
                            rs.format.z16, frame_rate)
       config.enable_stream(rs.stream.color, resolution[0], resolution[1],
                            rs.format.bgr8, frame_rate)
       self.pipeline.start(config)
       dev = find_device_that_supports_advanced_mode()
       self.advnc_mode = rs.rs400_advanced_mode(dev)
Exemplo n.º 11
0
def create_point_cloud(key_list, resolution_height, resolution_width,
                       device_manager, coordinate_dimentions,
                       transformation_devices):
    pcs = {}
    for camera in key_list:
        pcs[camera] = rs.pointcloud()
    pixels = resolution_width * resolution_height
    total_pixels = pixels * len(key_list)
    cloud = np.zeros((3, total_pixels))
    transformed_pixels = np.ones((4, pixels))
    idxe = np.random.permutation(cloud.shape[1])

    while True:
        start = time.time()
        the_frames = device_manager.poll_frames()

        for idx, camera in enumerate(key_list):
            pc = pcs[camera]
            frame = the_frames[camera][rs.stream.depth]
            points = pc.calculate(frame)
            vert = np.transpose(np.asanyarray(points.get_vertices(2)))
            transformed_pixels[:coordinate_dimentions, :] = vert
            calibrated_points = np.matmul(
                transformation_devices[camera].pose_mat, transformed_pixels)
            cloud[:, pixels * idx:pixels *
                  (idx + 1)] = calibrated_points[:coordinate_dimentions, :]

        print(1 / (start - time.time()))
    def initialize(self):
        print("Initialize")

        # realsense configuration
        try:
            config = rs.config()
            config.enable_device(self.params["device_serial"])
            config.enable_stream(rs.stream.depth, self.width, self.height,
                                 rs.format.z16, 30)
            config.enable_stream(rs.stream.color, self.width, self.height,
                                 rs.format.bgr8, 30)

            self.pointcloud = rs.pointcloud()
            self.pipeline = rs.pipeline()
            cfg = self.pipeline.start(config)
#            profile = cfg.get_stream(rs.stream.color) # Fetch stream profile for depth stream
#            intr = profile.as_video_stream_profile().get_intrinsics() # Downcast to video_stream_profile and fetch intrinsics
#            print (intr.fx, intr.fy)
#            depth_scale = cfg.get_device().first_depth_sensor().get_depth_scale()
#            print("Depth Scale is: " , depth_scale)
#            sys.exit(-1)
        except Exception as e:
            print("Error initializing camera")
            print(e)
            sys.exit(-1)
Exemplo n.º 13
0
    def init_reasense(self):
        # Configure depth and color streams
        self.pipeline = rs.pipeline()
        config = rs.config()

        # Get device product line for setting a supporting resolution
        pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
        pipeline_profile = config.resolve(pipeline_wrapper)
        device = pipeline_profile.get_device()
        device_product_line = str(device.get_info(rs.camera_info.product_line))

        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16,
                             self.fps)

        if device_product_line == 'L500':
            config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8,
                                 self.fps)
        else:
            config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8,
                                 self.fps)

        self.pc = rs.pointcloud()

        # Start streaming
        self.profile = self.pipeline.start(config)
        depth_sensor = self.profile.get_device().first_depth_sensor()
        self.depth_scale = depth_sensor.get_depth_scale()
        align_to = rs.stream.color
        self.align = rs.align(align_to)
Exemplo n.º 14
0
    def getPointsArray(self):

        #Distance at which a pixel is ignored
        clipping_distance = self.metersClippingDistance / self.depthSensor.get_depth_scale

        pointCloud = rs.pointcloud()
        points = rs.points

        # Get frameset of color and depth
        frames = self.pipeline.wait_for_frames()
        # frames.get_depth_frame() is a 640x360 depth image

        # Align the depth frame to color frame
        alignedFrames = self.aligner.process(frames)

        # Get aligned frames
        alignedDepthFrame = alignedFrames.get_depth_frame(
        )  # aligned_depth_frame is a 640x480 depth image
        colorFrame = alignedFrames.get_color_frame()

        depth_image = np.asanyarray(alignedDepthFrame.get_data())
        color_image = np.asanyarray(colorFrame.get_data())

        cv2.GaussianBlur(alignedDepthFrame, (5, 5), 9, alignedDepthFrame)

        depth_image_3d = np.dstack((depth_image, depth_image, depth_image))
Exemplo n.º 15
0
def scan():

    # Declare pointcloud object, for calculating pointclouds and texture mappings
    pc = pyrs.pointcloud()
    # We want the points object to be persistent so we can display the last cloud when a frame drops
    points = pyrs.points()
    # Declare RealSense pipeline, encapsulating the actual device and sensors
    pipe = pyrs.pipeline()
    #Start streaming with default recommended configuration
    profile = pipe.start()
    # to figgure out the depth scale, normally 0.001
    #depth_sensor = profile.get_device().first_depth_sensor()
    #print depth_sensor.get_depth_scale()

    try:
        # Wait for the next set of frames from the camera
        frames = pipe.wait_for_frames()
        # Fetch depth frames
        depth = frames.get_depth_frame()
        # calculate point cloud
        points = pc.calculate(depth)
        # get vertices
        pts = np.asanyarray(points.get_vertices())
        #return the list of vertices to rhino
        return pts

    finally:
        pipe.stop()
Exemplo n.º 16
0
def scan_points(depth_frame, d_height, thresh=0.2):
    pc = rs.pointcloud()
    points = pc.calculate(depth_frame)
    og_verts = points.get_vertices()
    print("og_verts: ", og_verts)

    verts = np.asanyarray(og_verts)

    print("verts: ", verts)
    print("obj_type: ", np.dtype(verts[0]))
    print("verts shape: ", verts.shape)

    print("test_pr: ", verts[['f0', 'f1', 'f2']])
    x = np.asarray(verts[['f0']], dtype=np.float32)
    y = np.asarray(verts[['f1']], dtype=np.float32)
    z = np.asarray(verts[['f2']], dtype=np.float32)

    print("x: ", x)

    full = np.column_stack((x, y, z))
    full = full.astype(np.float32)

    print("full: ", full)
    print("full shape: ", np.shape(full))

    #get only points that are around a certain height
    low_cond = full[:, 1] - d_height >= -thresh
    high_cond = full[:, 1] - d_height <= thresh

    truth_cond = np.logical_and(low_cond, high_cond)
    return full[truth_cond]
Exemplo n.º 17
0
    def take_point_cloud(self, file_name):
        rs_pipeline = self.init_pipeline_(None, True, True)

        color_frame = None
        depth_frame = None

        # Take first 3 photos to allow camera to initialize
        for i in range(0, 3):
            try:
                frames = rs_pipeline.wait_for_frames()
                color_frame = frames.get_color_frame()
                depth_frame = frames.get_depth_frame()
            except Exception as e:
                rs_pipeline.stop()
                raise RuntimeError("Error while capturing from " + self.camera_serial_number_ + ": " + str(e))

        if color_frame is None or depth_frame is None:
            rs_pipeline.stop()
            raise RuntimeError("Unable to capture frames from Realsense camera " + self.camera_serial_number_)

        # Process point cloud
        pc = rs.pointcloud()
        pc.map_to(color_frame)
        points = pc.calculate(depth_frame)
        points.export_to_ply(file_name, color_frame)

        rs_pipeline.stop()
Exemplo n.º 18
0
def set_pointcloud_variable():
    pc = rs.pointcloud()
    decimate = rs.decimation_filter()
    decimate.set_option(rs.option.filter_magnitude, 2**1)
    colorizer = rs.colorizer()

    return pc, decimate, colorizer
Exemplo n.º 19
0
def get_object_points(color_frame, depth_frame):
    """Short summary.

    Parameters
    ----------
    color_frame : type
        Description of parameter `color_frame`.
    depth_frame : type
        Description of parameter `depth_frame`.

    Returns
    -------
    type
        Description of returned object.

    """
    pc = rs.pointcloud()
    pc.map_to(color_frame)
    pointcloud = pc.calculate(depth_frame)
    v = pointcloud.get_vertices()
    verts = np.asanyarray(v).view(np.float32).reshape(-1, 3)
    cloud = pcl.PointCloud(verts)
    filtered_cloud = do_passthrough_filter(point_cloud=cloud,
                                           name_axis='z',
                                           min_axis=0.0,
                                           max_axis=0.6)
    downsampled_cloud = do_voxel_grid_filter(point_cloud=filtered_cloud,
                                             LEAF_SIZE=0.004)

    table_cloud, objects_cloud = do_ransac_plane_segmentation(
        downsampled_cloud, max_distance=0.01)
    # project_inliers = objects_cloud.make_ProjectInliers()
    # project_inliers.set_model_type(pcl.SACMODEL_NORMAL_PARALLEL_PLANE)
    # plane = project_inliers.filter()
    colorless_cloud = XYZRGB_to_XYZ(objects_cloud)
    clusters = get_clusters(objects_cloud,
                            tolerance=0.02,
                            min_size=100,
                            max_size=25000)

    colored_points = get_cloud_clusters(clusters, colorless_cloud)
    # Create a cloud with each cluster of points having the same color
    clusters_cloud = pcl.PointCloud()
    clusters_cloud.from_list(colored_points)
    color_intrin = color_frame.profile.as_video_stream_profile().intrinsics
    depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(
        color_frame.profile)
    Pixel_Coord = []
    for data in clusters_cloud:
        if np.round(data[2], 2) > 0.1:
            color_point = rs.rs2_transform_point_to_point(
                depth_to_color_extrin, list(data))
            Pixel_Coord.append(
                rs.rs2_project_point_to_pixel(color_intrin, color_point))
    # pcl.save(downsampled_cloud, "12.ply")
    # if clusters_cloud.size > 0:
    #     pcl.save(clusters_cloud, "13.ply")
    return Pixel_Coord
Exemplo n.º 20
0
def camera_detect(predictor, serials, item_metadata):
    items_dicts = []

    # 相机图片切取
    cams_cut = [[[300, 650], [240, 880]], [[140, 720], [90, 980]]]

    for camera_num, serial in enumerate(serials):
        # Set the Camera
        # Configure depth and color streams
        pipeline = rs.pipeline()
        config = rs.config()
        config.enable_device(serial)
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 15)
        # Start streaming
        pipeline.start(config)

        align = rs.align(rs.stream.color)
        pc = rs.pointcloud()

        for i in range(20):
            # print('Camera heating! Wait for a second!')
            # Wait for a coherent pair of frames: depth and color
            frames = pipeline.wait_for_frames()
            frames = align.process(frames)
            depth = frames.get_depth_frame()
            color = frames.get_color_frame()
        print('Camera heating over.')

        print(f'----------{camera_num+1}号相机-----------')
        frames = pipeline.wait_for_frames()
        frames = align.process(frames)

        depth = frames.get_depth_frame()
        color = frames.get_color_frame()

        img_color = np.asanyarray(color.get_data())
        # img_depth = np.asanyarray(depth.get_data())
        pc.map_to(color)
        points = pc.calculate(depth)
        vtx = np.asanyarray(points.get_vertices())
        vtx = np.reshape(vtx, (720, 1280, -1))

        cam_cut = cams_cut[camera_num]
        img_cut = img_color[cam_cut[0][0]:cam_cut[0][1],
                            cam_cut[1][0]:cam_cut[1][1], :]
        vtx = vtx[cam_cut[0][0]:cam_cut[0][1], cam_cut[1][0]:cam_cut[1][1], :]
        target_items_dict = detect_items_in_image(img_cut, predictor,
                                                  item_metadata)
        items_dict = save_items_point_cloud(vtx, img_cut[:, :, [2, 1, 0]],
                                            target_items_dict, camera_num)
        items_dicts.append(items_dict)
        # print(points[:3])

    print(f"lenth:{len(items_dicts)}")

    return items_dicts
Exemplo n.º 21
0
def visualise_point_cloud(key_list, resolution_height, resolution_width,
                          device_manager, coordinate_dimentions,
                          transformation_devices):
    # enable visualiser
    pcd = o3d.geometry.PointCloud()
    vis = o3d.visualization.Visualizer()
    vis.create_window()
    first_image = True

    pcs = {}
    for camera in key_list:
        pcs[camera] = rs.pointcloud()
    pixels = resolution_width * resolution_height
    total_pixels = pixels * len(key_list)
    cloud = np.zeros((3, total_pixels))
    transformed_pixels = np.ones((4, pixels))
    idxe = np.random.permutation(cloud.shape[1])

    while True:
        start = time.time()
        the_frames = device_manager.poll_frames()

        for idx, camera in enumerate(key_list):
            pc = pcs[camera]
            frame = the_frames[camera][rs.stream.depth]
            points = pc.calculate(frame)
            test = pc.map_to(frame)
            vert = np.transpose(np.asanyarray(points.get_vertices(2)))
            transformed_pixels[:coordinate_dimentions, :] = vert
            calibrated_points = np.matmul(
                transformation_devices[camera].pose_mat, transformed_pixels)
            cloud[:, pixels * idx:pixels *
                  (idx + 1)] = calibrated_points[:coordinate_dimentions, :]

        # Reduces rendered points and removes points with extreme z values
        keep_ratio = 0.005
        cloud_filtered = cloud[:,
                               idxe[0:math.floor(cloud.shape[1] * keep_ratio)]]
        #cloud_filtered = cloud_filtered - np.min(cloud_filtered[2, :])
        dist_thresh = 3
        cloud_filtered = -cloud_filtered[:, cloud_filtered[2, :] < dist_thresh]
        #cloud_filtered = cloud_filtered[:, cloud_filtered[2, :] > -1]
        #cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))]
        #cloud_filtered = cloud_filtered[:, np.invert(np.any(cloud_filtered > dist_thresh, axis=0))]

        # renders points from all different cameras
        # mlab.points3d( cloud_filtered[0, :],  cloud_filtered[1, :],  cloud_filtered[2, :], scale_factor=0.1)
        # mlab.show()

        pcd.points = o3d.utility.Vector3dVector(np.transpose(cloud_filtered))

        if first_image:
            vis.add_geometry(pcd)
            first_image = False
        vis.update_geometry()
        vis.poll_events()
        vis.update_renderer()
Exemplo n.º 22
0
def camera_detect(model, serials, cams_cut):
    item_dicts = []

    for camera_num, serial in enumerate(serials):
        # Set the Camera
        # Configure depth and color streams
        pipeline = rs.pipeline()
        config = rs.config()
        config.enable_device(serial)
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 15)
        # Start streaming
        pipeline.start(config)

        align = rs.align(rs.stream.color)
        pc = rs.pointcloud()

        if camera_num == 1:
            for i in range(20):
                # print('Camera heating! Wait for a second!')
                # Wait for a coherent pair of frames: depth and color
                frames = pipeline.wait_for_frames()
                frames = align.process(frames)
                depth = frames.get_depth_frame()
                color = frames.get_color_frame()
            print('Camera2 heating over.')

        for i in range(1):
            print(f'----------第{i+1}张照片-----------')
            frames = pipeline.wait_for_frames()
            frames = align.process(frames)

            depth = frames.get_depth_frame()
            color = frames.get_color_frame()

            img_color = np.asanyarray(color.get_data())
            # img_depth = np.asanyarray(depth.get_data())
            pc.map_to(color)
            points = pc.calculate(depth)
            vtx = np.asanyarray(points.get_vertices())
            vtx = np.reshape(vtx, (720, 1280, -1))

            img_color = img_color[
                cams_cut[camera_num][0][0]:cams_cut[camera_num][0][1],
                cams_cut[camera_num][1][0]:cams_cut[camera_num][1][1], :]

            vtx = vtx[cams_cut[camera_num][0][0]:cams_cut[camera_num][0][1],
                      cams_cut[camera_num][1][0]:cams_cut[camera_num][1][1], :]

            target_items_dict = detect_items_in_image(img_color, model)
            # 更改img_color的RGB顺序以正常显示
            item_dict = save_items_point_cloud(vtx, img_color[:, :, [2, 1, 0]],
                                               target_items_dict, camera_num)
            item_dicts.append(item_dict)
        # print(points[:3])

    return item_dicts[0], item_dicts[1]
    def visualize(self):

        # INFO: this function visualize the point cloud into an image
        captures = list()
        pc = rs.pointcloud()
        # 1) get point cloud from four cameras
        total_verts = list()
        total_cam_id = list()
        total_texcoords = list()
        total_colors = list()
        for i in range(4):
            cam_name = self.cam_names[i]
            verts, texcoords, depth, color = self.cams[cam_name][
                'verts'], self.cams[cam_name]['texcoords'], self.cams[
                    cam_name]['depth'], self.cams[cam_name]['color']
            total_verts.append(verts)
            # verts.shape = (320 * 240,3)
            total_cam_id.append(i * np.ones((verts.shape[0]), dtype=np.int32))
            # shape = (320 * 240)
            total_texcoords.append(texcoords)
            # texcoords.shape = (320 * 240, 2)
            total_colors.append(color)
            # color.shape = (480, 640, 3)
        total_verts = np.concatenate(total_verts, axis=0)
        # total_verts.shape = (320 * 240, 3)
        total_cam_id = np.concatenate(total_cam_id, axis=0)
        # total_cam_id.shape = (320 * 240)
        total_texcoords = np.concatenate(total_texcoords, axis=0)
        # total_texcoords.shape = (320 * 240, 2)
        total_colors = np.stack(total_colors, axis=0)
        # total_colors.shape = (4, 480, 640, 3)
        # 2) project point cloud from four physical cameras to the virtual camera
        v = self.view(total_verts, total_cam_id)
        s = v[:, 2].argsort()[::-1]
        # sort coordinates according to z value in descent order
        w, h = self.size(channel='depth')
        proj = self.project((w, h), v[s])
        # proj.shape = (h * w, 2)
        j, i = proj.astype(np.uint32).T
        # get u, v of homogeneous coordinate, shape = (h * w)
        total_cam_id = total_cam_id[s]
        # reorder cam id with the sorted sequence, shape = (h * w)
        # mask of visible voxel in image area
        im = (i >= 0) & (i < h)
        jm = (j >= 0) & (j < w)
        m = im & jm
        cw, ch = self.size(channel='color')
        # turn the texcoordinate into absolute coordinate
        v, u = (total_texcoords[s] * (cw, ch) + 0.5).astype(np.uint32).T
        # clip texcoordinate within captured image area
        np.clip(u, 0, ch - 1, out=u)
        np.clip(v, 0, cw - 1, out=v)
        # output
        out = np.empty((h, w, 3), dtype=np.uint8)
        out[i[m], j[m]] = total_colors[total_cam_id[m], u[m], v[m]]
        return out
Exemplo n.º 24
0
    def loop(q, log_level):
        logging.basicConfig(level=log_level)
        logging.debug("Camera process started")
        ctx = rs.context()
        devices = ctx.query_devices()
        if len(devices) == 0:
            raise NoDeviceDetectedException()
        for dev in devices:
            dev.hardware_reset()
        time.sleep(5)

        # Configure depth and color streams
        pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

        # Start streaming
        pipeline.start(config)

        # Get stream profile and camera intrinsics
        profile = pipeline.get_active_profile()
        depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
        intrinsics = depth_profile.get_intrinsics()

        # Point cloud acquisition
        pc = rs.pointcloud()
        colorizer = rs.colorizer()
        aligner = rs.align(rs.stream.color)

        while True:
            frames = aligner.process(pipeline.wait_for_frames())
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()

            if not depth_frame or not color_frame:
                continue

            depth_image = np.asanyarray(depth_frame.get_data())
            color_image = np.asanyarray(color_frame.get_data())

            q.put(
                (
                    color_image,
                    depth_image,
                    (
                        intrinsics.width,
                        intrinsics.height,
                        intrinsics.fx,
                        intrinsics.fy,
                        intrinsics.ppx,
                        intrinsics.ppy,
                    ),
                )
            )
            time.sleep(0.1)
Exemplo n.º 25
0
    def __init__(self, checkpoints_path, w=640, h=480):
        self.openpose = OpenposeLight(checkpoints_path)

        self.pipeline = rs.pipeline()

        self.pc = rs.pointcloud()

        self.align = rs.align(rs.stream.color)

        self.init_realsense(w, h)
Exemplo n.º 26
0
def xyz(depth):
    pc = rs.pointcloud()
    points = pc.calculate(depth)
    pts = np.asanyarray(points.get_vertices())
    ap = [*zip(*pts)]
    x = ap[0]
    y = ap[1]
    z = ap[2]

    return x, y, z
Exemplo n.º 27
0
def main(args, item_metadata):
    # input('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
    # Load model
    # model = MaskRCNN()

    # Set the Camera
    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 15)
    # Start streaming
    pipeline.start(config)

    align = rs.align(rs.stream.color)
    pc = rs.pointcloud()

    for i in range(20):
        # print('Camera heating! Wait for a second!')
        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        frames = align.process(frames)
        depth = frames.get_depth_frame()
        color = frames.get_color_frame()
    print('Camera heating over.')

    for i in range(100):
        print(f'----------第{i+1}张照片-----------')
        frames = pipeline.wait_for_frames()
        frames = align.process(frames)

        depth = frames.get_depth_frame()
        color = frames.get_color_frame()

        img_color = np.asanyarray(color.get_data())
        # img_depth = np.asanyarray(depth.get_data())
        pc.map_to(color)
        points = pc.calculate(depth)
        vtx = np.asanyarray(points.get_vertices())
        vtx = np.reshape(vtx, (720, 1280, -1))
        cam_cut = [[330, 700], [200, 980]]

        # get 3d point cloud (masked)
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        rgb_image = np.asanyarray(color_frame.get_data())
        rgb_image[:, :, [0, 2]] = rgb_image[:, :, [2, 0]]
        img_cut = img_color[cam_cut[0][0]:cam_cut[0][1],
                            cam_cut[1][0]:cam_cut[1][1], :]
        vtx = vtx[cam_cut[0][0]:cam_cut[0][1], cam_cut[1][0]:cam_cut[1][1], :]
        # rgb_image_cut = rgb_image[cam_cut[0][0]:cam_cut[0][1], cam_cut[1][0]:cam_cut[1][1], :]
        target_items_dict = detect_items_in_image(img_cut, args, item_metadata)
        save_objects_point_cloud(vtx, img_cut[:, :, [2, 1, 0]],
                                 target_items_dict)
Exemplo n.º 28
0
    def __init__(self, profile):
        depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))
        self.depth_intrinsics = depth_profile.get_intrinsics()
        self.w, self.h = self.depth_intrinsics.width, self.depth_intrinsics.height

        self.state = PointCloudState()
        self.out = np.empty((self.h, self.w, 3), dtype=np.uint8)

        self.pc = rs.pointcloud()
        self.decimate = rs.decimation_filter()
        self.decimate.set_option(rs.option.filter_magnitude, 2 ** self.state.decimate)
Exemplo n.º 29
0
def main():
    # input('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
    # Load model
    model = MaskRCNN()

    # Set the Camera
    # Configure depth and color streams
    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 15)
    # Start streaming
    pipeline.start(config)

    align = rs.align(rs.stream.color)
    pc = rs.pointcloud()

    for i in range(20):
        # print('Camera heating! Wait for a second!')
        # Wait for a coherent pair of frames: depth and color
        frames = pipeline.wait_for_frames()
        frames = align.process(frames)
        depth = frames.get_depth_frame()
        color = frames.get_color_frame()
    print('Camera heating over.')

    for i in range(100):
        print(f'----------第{i+1}张照片-----------')
        frames = pipeline.wait_for_frames()
        frames = align.process(frames)

        depth = frames.get_depth_frame()
        color = frames.get_color_frame()

        img_color = np.asanyarray(color.get_data())
        # img_depth = np.asanyarray(depth.get_data())
        pc.map_to(color)
        points = pc.calculate(depth)
        vtx = np.asanyarray(points.get_vertices())
        vtx = np.reshape(vtx, (720, 1280, -1))

        # get 3d point cloud (masked)
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()
        rgb_image = np.asanyarray(color_frame.get_data())
        rgb_image[:, :, [0, 2]] = rgb_image[:, :, [2, 0]]
        # print(rgb_image.shape)
        # plt.imshow(rgb_image)
        # plt.pause(1)  # pause 1 second
        # plt.clf()
        target_objects_dict = detect_objects_in_image(img_color, model)

        save_objects_point_cloud(vtx, rgb_image, target_objects_dict)
    def update_poly_data(self, obj=None, event=None):
        cam_counter = 0
        rgb_cam_images = []

        for pipe in pipelines:
            frame_set = pipe.wait_for_frames()
            # Wait for a coherent color frame
            # frames = real_sense_cam.get_pipeline().wait_for_frames()
            # Align the depth frame to color frame
            aligned_frames = self.align.process(frame_set)

            depth_frame = aligned_frames.get_depth_frame()
            color_frame = aligned_frames.get_color_frame()

            pc = rs.pointcloud()

            point_cloud = pc.calculate(depth_frame)
            pc.map_to(color_frame)
            v, t = point_cloud.get_vertices(), point_cloud.get_texture_coordinates()
            vertices = np.asanyarray(v).view(np.float32).reshape(-1, 3)  # xyz

            color_image = np.asanyarray(color_frame.get_data())
            rgb_cam_images.append(np_color_img_to_q_image(color_image))
            color_image = color_image.reshape((color_image.shape[0] * color_image.shape[1], 3))

            colors = vtk.vtkUnsignedCharArray()
            colors.SetNumberOfComponents(3)

            colors.SetArray(vtk_np.numpy_to_vtk(color_image), color_image.shape[0] * color_image.shape[1], 1)
            self.pd_collection[cam_counter].GetPointData().SetScalars(colors)

            points = vtk.vtkPoints()
            cells = vtk.vtkCellArray()

            points.SetData(vtk_np.numpy_to_vtk(vertices))
            cells_npy = np.vstack([np.ones(self._n_coordinates, dtype=np.int64),
                                   np.arange(self._n_coordinates, dtype=np.int64)]).T.flatten()
            cells.SetCells(self._n_coordinates, vtk_np.numpy_to_vtkIdTypeArray(cells_npy))

            self.pd_collection[cam_counter].SetPoints(points)
            self.pd_collection[cam_counter].SetVerts(cells)
            self.pd_collection[cam_counter].Modified()
            cam_counter += 1

        self.frames_ready.emit(rgb_cam_images[0],
                               rgb_cam_images[1],
                               rgb_cam_images[2])

        self.__iren.GetRenderWindow().Render()
        # print(self.timer_count)
        self.timer_count += 1
Exemplo n.º 31
0
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2017 Intel Corporation. All Rights Reserved.

#####################################################
##                  Export to PLY                  ##
#####################################################

# First import the library
import pyrealsense2 as rs


# Declare pointcloud object, for calculating pointclouds and texture mappings
pc = rs.pointcloud()
# We want the points object to be persistent so we can display the last cloud when a frame drops
points = rs.points()

# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
#Start streaming with default recommended configuration
pipe.start();

try:
    # Wait for the next set of frames from the camera
    frames = pipe.wait_for_frames()

    # Fetch color and depth frames
    depth = frames.get_depth_frame()
    color = frames.get_color_frame()

    # Tell pointcloud object to map to this color frame
    pc.map_to(color)