Exemplo n.º 1
0
def capture(pipelines, serial_numbers, prefix, postfix):

    pc = rs.pointcloud()
    points = rs.points()
    for camNo, pipe in enumerate(pipelines):
        frames = pipe.wait_for_frames()

        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        pc.map_to(color_frame)
        points = pc.calculate(depth_frame)
        points.export_to_ply(
            prefix + str(serial_numbers[camNo]) + '_' + postfix + '.ply',
            color_frame)

        print('Pointcloud saved for camera with serial number',
              serial_numbers[camNo])

        color_image = np.asanyarray(color_frame.get_data())
        imsave(
            prefix + str(serial_numbers[camNo]) + '_' + postfix +
            'color_fore.tif', color_image)

        tex_coor = np.asanyarray(points.get_texture_coordinates_EXT())
        imsave(
            prefix + str(serial_numbers[camNo]) + '_' + postfix +
            'texture_fore.tif', tex_coor)
Exemplo n.º 2
0
def scan():

    # Declare pointcloud object, for calculating pointclouds and texture mappings
    pc = pyrs.pointcloud()
    # We want the points object to be persistent so we can display the last cloud when a frame drops
    points = pyrs.points()
    # Declare RealSense pipeline, encapsulating the actual device and sensors
    pipe = pyrs.pipeline()
    #Start streaming with default recommended configuration
    profile = pipe.start()
    # to figgure out the depth scale, normally 0.001
    #depth_sensor = profile.get_device().first_depth_sensor()
    #print depth_sensor.get_depth_scale()

    try:
        # Wait for the next set of frames from the camera
        frames = pipe.wait_for_frames()
        # Fetch depth frames
        depth = frames.get_depth_frame()
        # calculate point cloud
        points = pc.calculate(depth)
        # get vertices
        pts = np.asanyarray(points.get_vertices())
        #return the list of vertices to rhino
        return pts

    finally:
        pipe.stop()
    def __init__(self):
        # camera matrix of realsense
        self.camera_info = CameraInfo()
        # camera_info.K = [616.3787841796875, 0.0, 434.0303955078125, 0.0, 616.4257202148438, 234.33065795898438, 0.0, 0.0, 1.0]
        self.camera_info.K = [
            931.6937866210938, 0.0, 624.7894897460938, 0.0, 931.462890625,
            360.5186767578125, 0.0, 0.0, 1.0
        ]
        self.camera_info.header.frame_id = 'camera_color_optical_frame'
        self.camera_info.height = 720
        self.camera_info.width = 1280
        self.image_size = [640, 480]
        self.points = rs.points()
        self.pipeline = rs.pipeline()
        self.config = rs.config()
        self.config.enable_stream(rs.stream.color, self.image_size[0],
                                  self.image_size[1], rs.format.bgr8, 30)
        self.config.enable_stream(rs.stream.depth, self.image_size[0],
                                  self.image_size[1], rs.format.z16, 30)
        self.profile = self.pipeline.start(self.config)
        self.align_to = rs.stream.color
        self.align = rs.align(self.align_to)

        self.board_center_x = 0
        self.board_center_y = 0

        self.board = [[[0, 0], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0]],
                      [[0, 0], [0, 0], [0, 0]]]

        self.h_choice = ''

        self.R = [0, 0, 255]
        self.G = [0, 255, 0]
        self.B = [255, 0, 0]
Exemplo n.º 4
0
def capture_RealSense(name):
    points = rs.points()

    success, frames = pipeline.try_wait_for_frames(timeout_ms=10)

    depth_frame = frames.get_depth_frame()
    other_frame = frames.first(other_stream)
    color = frames.get_color_frame()
    
    depth_frame = decimate.process(depth_frame)

    if state.postprocessing:
        for f in filters:
            depth_frame = f.process(depth_frame)

    points = pc.calculate(depth_frame)

    pc.map_to(other_frame)

    ply = name + ".ply"
    pcd = name + ".pcd"
    points.export_to_ply(ply, color)
    clouding = pcl.load(ply)
    pcl.save(clouding, pcd)
    print("Export Reussi")
Exemplo n.º 5
0
    def __init__(self, camera_configuration_file_path):
        self._cfg = yaml.load(open(camera_configuration_file_path, 'r'),
                              Loader=yaml.FullLoader)
        self.width = self._cfg["width"]
        self.height = self._cfg["height"]
        self.fps = self._cfg["fps"]
        self.serial_number = self._cfg["serial_number"]

        self.points = rs.points()
        self.pipeline = rs.pipeline()
        config = rs.config()
        if self.serial_number != '' and self.serial_number is not None:
            print('Config for realsense %s' % self.serial_id)

        # the resolution of color image can be higher than depth image
        config.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8,
                             self.fps)
        config.enable_stream(rs.stream.infrared, 2, 1280, 720, rs.format.y8,
                             self.fps)
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16,
                             self.fps)
        config.enable_stream(rs.stream.color, self.width, self.height,
                             rs.format.bgr8, self.fps)

        self.profile = self.pipeline.start(config)
        align_to = rs.stream.color
        self.align = rs.align(align_to)

        self.scale = self.get_depth_scale()
Exemplo n.º 6
0
    def __init__(self, serial_id='', width=1280, height=720, fps=30):
        self.width = width
        self.height = height
        self.fps = fps

        self.points = rs.points()
        self.pipeline = rs.pipeline()
        config = rs.config()
        if serial_id != '':
            config.enable_device(serial=serial_id)
        config.enable_stream(rs.stream.infrared, 1, 1280, 720, rs.format.y8,
                             self.fps)
        config.enable_stream(rs.stream.infrared, 2, 1280, 720, rs.format.y8,
                             self.fps)
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16,
                             self.fps)
        config.enable_stream(rs.stream.color, self.width, self.height,
                             rs.format.bgr8, self.fps)

        self.profile = self.pipeline.start(config)
        align_to = rs.stream.color
        self.align = rs.align(align_to)

        for i in range(45):
            self.getImage()
Exemplo n.º 7
0
def get_pointcloud(depth_image, color_frame, img, img_size):
    point_cloud = rs.pointcloud()
    points = rs.points()

    # Obtain point cloud data
    point_cloud.map_to(color_frame)
    points = point_cloud.calculate(depth_image)

    # Convert point cloud to 2d Array
    points3d = np.asanyarray(points.get_vertices())
    points3d = points3d.view(np.float32).reshape(points3d.shape + (-1, ))
    texture_coords = np.asanyarray(points.get_texture_coordinates())
    texture_coords = texture_coords.view(
        np.float32).reshape(texture_coords.shape + (-1, ))

    # Remove all invalid data within a certain distance
    long_distance_mask = points3d[:, 2] < 10
    short_distance_mask = points3d[:, 2] > 0.3
    distance_mask = np.logical_and(long_distance_mask, short_distance_mask)
    points3d = points3d[distance_mask]

    # Sample random points
    idx = np.random.randint(points3d.shape[0],
                            size=round(points3d.shape[0] / 100))
    sampled_points = points3d[idx, :]

    # Get colours of points
    point_colors = []
    point_colors = np.array(point_colors)

    return sampled_points, point_colors
Exemplo n.º 8
0
def capture_RealSense(name):
    points = rs.points()
    global w, h

    if state.paused:
        return

    success, frames = pipeline.try_wait_for_frames(timeout_ms=0)
    if not success:
        return

    depth_frame = frames.get_depth_frame()

    depth_frame = decimate.process(depth_frame)

    if state.postprocessing:
        for f in filters:
            depth_frame = f.process(depth_frame)

    # Grab new intrinsics (may be changed by decimation)
    depth_intrinsics = rs.video_stream_profile(
        depth_frame.profile).get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height

    points = pc.calculate(depth_frame)

    verts = np.array(points.get_vertices(2))
    """processes = []
    starttime = time.time()
    for i in verts:
        p = multiprocessing.Process(target=recup_data_cam, args=(i,))
        processes.append(p)
        p.start()

    for process in processes:
        process.join()"""
    starttime = time.time()
    pool = multiprocessing.Pool(12)
    image3d = pool.map(recup_data_cam, verts)
    pool.close()
    """for i in verts:
        for j in i:
            if 0.5 > j[0] > -0.5:
                if 0.5 > j[1] > -0.5:
                    if 0.5 > j[2] > -0.5:
                        taille = taille + 1"""
    print('That took {} seconds'.format(time.time() - starttime))

    starttime = time.time()
    image3d = sup_occurence(image3d)
    print('That took {} seconds'.format(time.time() - starttime))
    print(image3d)
    """for i in image3d:
        print("ici")
        for j in i:
            print(j)
            pass"""
    ttt = input("wait")
Exemplo n.º 9
0
def get_pointcloud(depth_image, color_frame, img, img_size):
    point_cloud = rs.pointcloud()
    points = rs.points()

    # Obtain point cloud data
    point_cloud.map_to(color_frame)
    points = point_cloud.calculate(depth_image)

    # Convert point cloud to 2d Array
    points3d = np.asanyarray(points.get_vertices())
    points3d = points3d.view(np.float32).reshape(points3d.shape + (-1, ))
    texture_coords = np.asanyarray(points.get_texture_coordinates())
    texture_coords = texture_coords.view(
        np.float32).reshape(texture_coords.shape + (-1, ))

    # Remove all invalid data within a certain distance
    long_distance_mask = points3d[:, 2] < 10
    short_distance_mask = points3d[:, 2] > 0.3
    distance_mask = np.logical_and(long_distance_mask, short_distance_mask)
    points3d = points3d[distance_mask]
    texture_coords = texture_coords[distance_mask]

    # Get colours
    u_coords = ((texture_coords[:, 0]) * img_size[0])
    u_coords = np.round(np.clip(u_coords, a_min=0, a_max=img_size[0] - 1))
    v_coords = ((texture_coords[:, 1]) * img_size[1])
    v_coords = np.round(np.clip(v_coords, a_min=0, a_max=img_size[1] - 1))
    uv_coords = np.vstack((u_coords, v_coords)).T.astype(np.uint16)

    # Sample random points
    idx = np.random.randint(points3d.shape[0],
                            size=round(points3d.shape[0] / 500))
    sampled_points = points3d[idx, :]
    uv_coords = uv_coords[idx, :]

    # Add extra column of 0's to 3d points
    o = np.ones((sampled_points.shape[0], 1))
    sampled_points = np.hstack((sampled_points, o))

    # Get colours of points
    point_colors = []
    for i, coord in enumerate(uv_coords):
        cols = img[coord[0], coord[1], :]
        point_colors.append(cols)

    point_colors = np.array(point_colors)

    return sampled_points, point_colors
Exemplo n.º 10
0
    def __init__(self, size=[640, 480]):
        self.pipe = rs.pipeline()

        config = rs.config()
        config.enable_stream(rs.stream.depth, size[0], size[1], rs.format.z16,
                             30)
        config.enable_stream(rs.stream.color, size[0], size[1], rs.format.bgr8,
                             30)

        # Start streaming
        self.profile = self.pipe.start(config)
        # Declare pointcloud object, for calculating pointclouds and texture mappings
        self.pc = rs.pointcloud()
        # We want the points object to be persistent so we can display the last cloud when a frame drops
        self.points = rs.points()
        self.init_sensor()
        print('Sensor Initilization Done!')
        self.extract_instrics()
        self.saliency_cut = SaliencyCut()
Exemplo n.º 11
0
def export_to_ply():
    # Declare pointcloud object, for calculating pointclouds and texture mappings
    pc = rs2.pointcloud()
    # We want the points object to be persistent so we can display the last cloud when a frame drops
    points = rs2.points()

    # Declare RealSense pipeline, encapsulating the actual device and sensors
    pipe = rs2.pipeline()
    config = rs2.config()
    # Enable depth stream
    # config.enable_stream(rs2.stream.depth)
    config.enable_stream(rs2.stream.depth, 640, 480, rs2.format.z16, 30)
    config.enable_stream(rs2.stream.color, 640, 480, rs2.format.rgb8, 30)

    # Start streaming with chosen configuration
    pipe.start(config)

    # We'll use the colorizer to generate texture for our PLY
    # (alternatively, texture can be obtained from color or infrared stream)
    colorizer = rs2.colorizer()
    colorizer.set_option(rs2.option.color_scheme, 0)

    try:
        # Wait for the next set of frames from the camera
        frames = pipe.wait_for_frames()
        colorized = colorizer.process(frames)

        # Create save_to_ply object
        ply = rs2.save_to_ply("1.ply")

        # Set options to the desired values
        # In this example we'll generate a textual PLY with normals (mesh is already created by default)
        ply.set_option(rs2.save_to_ply.option_ply_binary, True)
        ply.set_option(rs2.save_to_ply.option_ply_normals, False)

        print("Saving to 1.ply...")
        # Apply the processing block to the frameset which contains the depth frame and the texture
        ply.process(colorized)
        print("Done")
    finally:
        pipe.stop()
Exemplo n.º 12
0
def capture(isBack, pipelines, serial_numbers, isFirst, prefix, postfix):
    if isBack:
        ground = 'back'
    else:
        ground = 'fore'
        pc = rs.pointcloud()
        points = rs.points()

    for camNo, pipe in enumerate(pipelines):
        frames = pipe.wait_for_frames()

        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        color_image = np.asanyarray(color_frame.get_data())

        if not isBack:
            pc.map_to(color_frame)
            points = pc.calculate(depth_frame)
            points.export_to_ply(
                prefix + str(serial_numbers[camNo]) + '_' + postfix + ground +
                '.ply', color_frame)

            tex_coor = np.asanyarray(points.get_texture_coordinates_EXT())
            imsave(
                prefix + str(serial_numbers[camNo]) + '_' + postfix +
                'texture_' + ground + '.tif', tex_coor)

            print(
                'Foreground pointcloud and texture coordinates saved for camera with serial number ',
                serial_numbers[camNo])

        imsave(
            prefix + str(serial_numbers[camNo]) + '_' + postfix + 'color_' +
            ground + '.tif', color_image)
        print(
            ground + 'ground color image saved for camera with serial number ',
            serial_numbers[camNo])

        if isFirst:
            input("Place balls in the box and press enter")
Exemplo n.º 13
0
    def __init__(self, width=1280, hight=720, fps=30):
        self.width = width
        self.hight = hight
        self.fps = fps

        self.points = rs.points()
        self.pipeline = rs.pipeline()
        config = rs.config()
        config.enable_stream(rs.stream.infrared, 1, self.width, self.hight,
                             rs.format.y8, self.fps)
        config.enable_stream(rs.stream.infrared, 2, self.width, self.hight,
                             rs.format.y8, self.fps)
        config.enable_stream(rs.stream.depth, self.width, self.hight,
                             rs.format.z16, self.fps)
        config.enable_stream(rs.stream.color, self.width, self.hight,
                             rs.format.bgr8, self.fps)

        profile = self.pipeline.start(config)
        align_to = rs.stream.color
        self.align = rs.align(align_to)
        time.sleep(1)
Exemplo n.º 14
0
def scangrid():

    # Declare pointcloud object, for calculating pointclouds and texture mappings
    pc = pyrs.pointcloud()
    # We want the points object to be persistent so we can display the last cloud when a frame drops
    points = pyrs.points()
    # Declare RealSense pipeline, encapsulating the actual device and sensors
    pipe = pyrs.pipeline()
    #Start streaming with default recommended configuration
    profile = pipe.start()
    # to figgure out the depth scale, normally 0.001
    #depth_sensor = profile.get_device().first_depth_sensor()
    #print depth_sensor.get_depth_scale()

    try:
        # Wait for the next set of frames from the camera
        frames = pipe.wait_for_frames()
        # Fetch depth frames
        depth = frames.get_depth_frame()

        points = pc.calculate(depth)

        pts = np.asanyarray(points.get_vertices())

        # use: from scipy.interpolate import griddata
        # https://earthscience.stackexchage.com/questions/12057/how-to-interpolate-scattered-data-to-a-regular-grid-in-python/
        # use a mask to mask-out the outline..

        # values to return to rhino
        return pts

        # convert to numpy array
        #npy_vtx = np.zeros((len(vtx), 3), float)
        #for i in range(len(vtx)):
        #    npy_vtx[i][0] = np.float(vtx[i][0])
        #    npy_vtx[i][1] = np.float(vtx[i][1])
        #    npy_vtx[i][2] = np.float(vtx[i][2])

    finally:
        pipe.stop()
Exemplo n.º 15
0
def extract_frames(pipe,
                   cfg,
                   save_path,
                   post_processing=False,
                   save_colorize=True,
                   save_pc=False,
                   visualize=True):
    """Helper function to align and extract rgb-d image from bagfile.
       Check more saving options in arguments.
    Args:
        pipe: pyrealsense2 pipeline.
        cfg: pyrealsense2 pipeline configuration.
        save_path: Path to save the extracted frames.
        save_colorize: Save colorized depth maps visualization.
        save_pc: Save point cloud data.
        visualize: Visualize the video frames during saving.
    """
    # Configurations
    if save_colorize:
        colorizer = rs.colorizer()
    if save_pc:
        pc = rs.pointcloud()
        points = rs.points()
    # Save path
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    # Start the pipe
    i = 0
    profile = pipe.start(cfg)
    device = profile.get_device()
    playback = device.as_playback()
    playback.set_real_time(
        False)  # Make sure this is False or frames get dropped
    while True:
        try:
            # Wait for a conherent pairs of frames: (rgb, depth)
            pairs = pipe.wait_for_frames()

            # Align depth image to rgb image first
            align = rs.align(rs.stream.color)
            pairs = align.process(pairs)

            color_frame = pairs.get_color_frame()
            depth_frame = pairs.get_depth_frame()
            if not depth_frame or not color_frame:
                continue

            # Post-processing
            if post_processing:
                depth_frame = post_processing(depth_frame)

            # Get rgb-d images
            color_img = np.asanyarray(color_frame.get_data())
            color_img = cv2.cvtColor(color_img, cv2.COLOR_RGB2BGR)
            depth_img = np.asanyarray(depth_frame.get_data())
            print('Frame {}, Depth Image {}, Color Image {}'.format(
                i + 1, depth_img.shape, color_img.shape))

            # Save as loseless formats
            cv2.imwrite(os.path.join(save_path, '{}_rgb.png'.format(i)),
                        color_img, [cv2.IMWRITE_PNG_COMPRESSION, 0])
            np.save(os.path.join(save_path, '{}_depth.npy'.format(i)),
                    depth_img)

            if save_colorize:
                # Save colorized depth map
                depth_img_colorized = np.asanyarray(
                    colorizer.colorize(depth_frame).get_data())
                cv2.imwrite(
                    os.path.join(save_path,
                                 '{}_depth_colorized.jpg'.format(i)),
                    depth_img_colorized)  # No need for lossless here

            if save_pc:
                # NOTE: Point cloud calculation takes longer time.
                pc.map_to(color_frame)
                points = pc.calculate(depth_frame)
                points.export_to_ply(
                    os.path.join(save_path, '{}_pc.ply'.format(i)),
                    color_frame)

            i += 1

            if visualize:
                # Stack both images horizontally
                images = np.vstack((color_img, depth_img_colorized))

                # Show images
                cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)
                cv2.imshow('RealSense', images)
                cv2.waitKey(1)

        except Exception as e:
            print(e)
            break

    # Clean pipeline
    pipe.stop()
    print('{} frames saved in total.'.format(i))

    return
Exemplo n.º 16
0
def get_pointcloud_frame(bagfile_dir, filter = False):

    # -------------------------------------------- #
    # Read the bag file and save point cloud to .ply file
    # n_img: number of matrix you want to save
    # interval: number of frames between saved matrix
    # -------------------------------------------- #

    input = bagfile_dir

    try:
        # Declare pointcloud object, for calculating pointclouds and texture mappings
        pc = rs.pointcloud()

        # We want the points object to be persistent so we can display the last cloud when a frame drops
        points = rs.points()
        
        # Create pipeline
        pipeline = rs.pipeline()

        # Create a config object
        config = rs.config()
        
        # Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
        rs.config.enable_device_from_file(config, input)
    
        # Configure the pipeline to stream the depth and color stream
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)

        # Declare filters
        dec_filter = rs.decimation_filter()   # Decimation - reduces depth frame density
        
        spat_filter = rs.spatial_filter()          # Spatial    - edge-preserving spatial smoothing
        
        temp_filter = rs.temporal_filter()    # Temporal   - reduces temporal noise

        fill_filter = rs.hole_filling_filter()  # Hole Filling filter

        disp_filter = rs.disparity_transform()  # Disparity Transform filter

        # Start streaming from file
        profile = pipeline.start(config)

        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: " , depth_scale)

        while(True):

            # Get frameset of depth
            frames = pipeline.wait_for_frames()

            # Get depth frame
            depth_frame = frames.get_depth_frame()
            color_frame = frames.get_color_frame()

            if (filter):
                # Perform filtering
                filtered = dec_filter.process(depth_frame)

                filtered = spat_filter.process(filtered)

                filtered = fill_filter.process(filtered)

                depth_frame = temp_filter.process(filtered)

                color_frame = dec_filter.process(color_frame)
            
            # Tell pointcloud object to map to this color frame
            pc.map_to(color_frame)

            # Generate the pointcloud and texture mappings
            points = pc.calculate(depth_frame)
            
            # yield
            points_array = np.asanyarray(points.get_vertices())
            
            yield np.asanyarray(depth_frame.get_data()), points_array
                
    finally:
        pass
    return
Exemplo n.º 17
0
    def __init__(self, camera_configuration_file_path):
        self._cfg = yaml.load(open(camera_configuration_file_path, 'r'),
                              Loader=yaml.FullLoader)
        self.serial_number = self._cfg["serial_number"]

        self.points = rs.points()
        self.pipeline = rs.pipeline()
        config = rs.config()
        if self.serial_number != '' and self.serial_number != 'None':
            print('Config for realsense %s' % self.serial_number)
            config.enable_device(self.serial_number)

        # the resolution of color image can be higher than depth image
        if self._cfg["name"] == 'L515':
            config.enable_stream(rs.stream.infrared,
                                 self._cfg['depth_infrared']['width'],
                                 self._cfg['depth_infrared']['height'],
                                 rs.format.y8,
                                 self._cfg['depth_infrared']['fps'])
            config.enable_stream(rs.stream.depth,
                                 self._cfg['depth_infrared']['width'],
                                 self._cfg['depth_infrared']['height'],
                                 rs.format.z16,
                                 self._cfg['depth_infrared']['fps'])
            config.enable_stream(rs.stream.color, self._cfg['color']['width'],
                                 self._cfg['color']['height'], rs.format.bgr8,
                                 self._cfg['color']['fps'])
        else:
            config.enable_stream(rs.stream.infrared, 1,
                                 self._cfg['depth_infrared']['width'],
                                 self._cfg['depth_infrared']['height'],
                                 rs.format.y8,
                                 self._cfg['depth_infrared']['fps'])
            config.enable_stream(rs.stream.infrared, 2,
                                 self._cfg['depth_infrared']['width'],
                                 self._cfg['depth_infrared']['height'],
                                 rs.format.y8,
                                 self._cfg['depth_infrared']['fps'])
            config.enable_stream(rs.stream.depth,
                                 self._cfg['depth_infrared']['width'],
                                 self._cfg['depth_infrared']['height'],
                                 rs.format.z16,
                                 self._cfg['depth_infrared']['fps'])
            config.enable_stream(rs.stream.color, self._cfg['color']['width'],
                                 self._cfg['color']['height'], rs.format.bgr8,
                                 self._cfg['color']['fps'])

        self.profile = self.pipeline.start(config)

        if "exposure" in self._cfg['color'].keys(
        ) and self._cfg['color']['exposure'] != 'None':
            # set camera parameters
            for i in range(len(self.profile.get_device().sensors)):
                if self.profile.get_device().sensors[i].is_color_sensor():
                    # set color camera
                    color_sensor = self.profile.get_device().sensors[i]
                    color_sensor.set_option(rs.option.enable_auto_exposure, 0)
                    color_sensor.set_option(rs.option.exposure,
                                            self._cfg['color']['exposure'])
                    # color_sensor.set_option(rs.option.gain, 100)
                    print("camera exposure: ",
                          color_sensor.get_option(rs.option.exposure) * 0.1,
                          'ms')
        '''
        The align class used in librealsense demos maps between depth and some other stream and vice versa. 
        Realsense do not offer other forms of stream alignments.
        https://github.com/IntelRealSense/librealsense/issues/1556
        '''
        self.basic_img = self._cfg["align_image"]
        if self._cfg["align_image"] == 'color':
            align_to = rs.stream.color
        elif self._cfg["align_image"] == 'depth':
            align_to = rs.stream.depth
        else:
            print("Error align image!")
            print("Align to color!")
            align_to = rs.stream.color

        self.align = rs.align(align_to)

        self.scale = self.get_depth_scale()
Exemplo n.º 18
0
def main():
    #initialize node
    rospy.init_node('image_save')
    rospy.Subscriber("chatter", Point, callback_pos)
    rospy.Subscriber("chatter_ori", Point, callback_ori)

    rospy.Subscriber("/camera/color/image_raw", Image, realsense_callback)
    rospy.Subscriber("/camera/aligned_depth_to_color/image_raw", Image,
                     realsense_callback_depth)

    # Declare pointcloud object, for calculating pointclouds and texture mappings
    pc = rs.pointcloud()
    # We want the points object to be persistent so we can display the last cloud when a frame drops
    points = rs.points()

    # Declare RealSense pipeline, encapsulating the actual device and sensors
    pipe = rs.pipeline()
    config = rs.config()

    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    #Start streaming with default recommended configuration
    pipe.start(config)

    try:
        # Wait for the next set of frames from the camera
        frames = pipe.wait_for_frames()

        # unaligned
        #depth_frame = frames.get_depth_frame() is a 640x360 depth image
        #color_frame = frames.get_color_frame()

        # Align the depth frame to color frame
        aligned_frames = align.process(frames)
        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame(
        )  # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        if not aligned_depth_frame or not color_frame:
            pass

        # Convert images to numpy arrays
        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        #depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        points = pc.calculate(aligned_depth_frame)

        images = np.hstack((color_image, depth_image))
        cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('Align Example', images)
        key = cv2.waitKey(1)
        # Press esc or 'q' to close the image window
        if key & 0xFF == ord('q') or key == 27:
            cv2.destroyAllWindows()

        #name = "1"
        #print("Saving to 1.ply...")
        #points.export_to_ply(name + ".ply", color_frame)
        #cv2.imwrite(name + ".png",depth_image)
        #cv2.imwrite(name + ".jpg",color_image)
        #print("Done")

    finally:
        pipe.stop()
        processFlag = False

    rospy.spin()
Exemplo n.º 19
0
def main():
    # Declare pointcloud object, for calculating pointclouds and texture mappings
    pc = rs.pointcloud()
    # We want the points object to be persistent so we can display the last cloud when a frame drops
    points = rs.points()
    pipe = rs.pipeline()
    cfg = rs.config()
    rs.config.enable_device_from_file(cfg,
                                      './20191211_160154.bag',
                                      repeat_playback=False)
    cfg.enable_stream(rs.stream.color, 424, 240, rs.format.rgb8,
                      6)  # color camera
    cfg.enable_stream(rs.stream.depth, 424, 240, rs.format.z16,
                      6)  # depth camera
    # pipe.start(cfg)
    profile = pipe.start(cfg)
    playback = profile.get_device().as_playback()
    playback.set_real_time(False)
    # define Filters
    thres_filter = rs.threshold_filter()
    depth_to_disparity = rs.disparity_transform(True)
    spat_filter = rs.spatial_filter()
    temp_filter = rs.temporal_filter()
    # hole_fill_filter = rs.hole_filling_filter()
    disparity_to_depth = rs.disparity_transform(False)

    i = 0
    while True:
        try:
            frames = pipe.wait_for_frames()
        except:
            break
        print(i)
        depth_frame = frames.get_depth_frame()
        color_frame = frames.get_color_frame()

        # process depth image
        if True:
            depth_frame = thres_filter.process(depth_frame)
            depth_frame = depth_to_disparity.process(depth_frame)
            depth_frame = spat_filter.process(depth_frame)
            depth_frame = temp_filter.process(depth_frame)
            # depth_frame = hole_fill_filter.process(depth_frame)
            depth_frame = disparity_to_depth.process(depth_frame)

        depth_image = np.asanyarray(depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        depth_colormap = cv2.applyColorMap(
            cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

        points = pc.calculate(depth_frame)
        cloud = points_to_pcl(points)
        cloud_normals, cluster_indices = region_growing_segmentation(cloud)

        idx = choose_flat_plane(cluster_indices, cloud_normals)
        chosen_plane_indices = cluster_indices[idx]
        arr = cloud.to_array()
        plane_points = arr[chosen_plane_indices]
        plane_coef = fit_plane(plane_points)
        plane_filtered_indices = find_close_points(cloud, plane_coef)
        processed = mark_pixels(depth_colormap, 424, plane_filtered_indices)

        # Dilation
        img_processed = image_post_process(processed)
        cv2.imwrite("./images/proc_" + str(i) + ".png", img_processed)
        i += 1
Exemplo n.º 20
0
# imports
import pyrealsense2 as rs2
import openmesh as om
from os import remove

# get the file name of the stl file (and make sure it ends with '.stl')
filename = input("Enter the file name of the stl file: ")
if filename[-4:] != ".stl":
    filename += ".stl"

print("Taking a depth frame")

# declare pointcloud object, for calculating pointclouds and texture mappings
pc: rs2.pointcloud = rs2.pointcloud()
# we want the points object to be persistent so we can display the last cloud when a frame drops
points: rs2.points = rs2.points()

# declare RealSense pipeline, encapsulating the actual device and sensors
pipe: rs2.pipeline = rs2.pipeline()
config: rs2.config = rs2.config()
# enable depth stream
config.enable_stream(rs2.stream.depth)

# start streaming with chosen configuration
pipe.start(config)

# we'll use the colorizer to generate texture for our PLY
# (alternatively, texture can be obtained from color or infrared stream)
colorizer: rs2.colorizer = rs2.colorizer()

# create a thresh filter with max distance of 0.7 meter
Exemplo n.º 21
0
import pyrealsense2 as rs
import numpy as np

rs.pipeline()
    rs.pipeline.try_wait_for_frames()
    rs.pipeline.wait_for_frames()
rs.pipeline_profile()
    rs.pipeline_profile.get_device()
    rs.pipeline_profile.get_stream()
    rs.pipeline_profile.get_streams()
rs.pipeline_wrapper()
rs.playback()
rs.playback_status()
rs.points()
rs.pose()
rs.pose_frame()
rs.pose_stream_profile()

rs.pipeline
rs.pipeline.get_active_profile()
rs.pipeline.poll_for_frames()
rs.pipeline.start()
rs.pipeline.stop()
rs.pipeline.wait_for_frames()
rs.pipeline.try_wait_for_frames()
pipe = rs.pipeline()

config = rs.config()
config.enable_stream
config.enable_all_streams
config.enable_device
Exemplo n.º 22
0
def run(dt):
    points = rs.points()
    global w, h
    window.set_caption("RealSense (%dx%d) %dFPS (%.2fms) %s" %
                       (w, h, 0 if dt == 0 else 1.0 / dt, dt * 1000,
                        "PAUSED" if state.paused else ""))

    if state.paused:
        return

    success, frames = pipeline.try_wait_for_frames(timeout_ms=0)
    if not success:
        return

    depth_frame = frames.get_depth_frame()
    other_frame = frames.first(other_stream)
    color = frames.get_color_frame()
    
    depth_frame = decimate.process(depth_frame)

    if state.postprocessing:
        for f in filters:
            depth_frame = f.process(depth_frame)

    # Grab new intrinsics (may be changed by decimation)
    depth_intrinsics = rs.video_stream_profile(
        depth_frame.profile).get_intrinsics()
    w, h = depth_intrinsics.width, depth_intrinsics.height

    color_image = np.asanyarray(other_frame.get_data())

    colorized_depth = colorizer.colorize(depth_frame)
    depth_colormap = np.asanyarray(colorized_depth.get_data())

    if state.color:
        mapped_frame, color_source = other_frame, color_image
    else:
        mapped_frame, color_source = colorized_depth, depth_colormap

    points = pc.calculate(depth_frame)

    pc.map_to(mapped_frame)

    # handle color source or size change
    fmt = convert_fmt(mapped_frame.profile.format())
    global image_data
    if (image_data.format, image_data.pitch) != (fmt, color_source.strides[0]):
        empty = (gl.GLubyte * (w * h * 3))()
        image_data = pyglet.image.ImageData(w, h, fmt, empty)
    # copy image deta to pyglet
    image_data.set_data(fmt, color_source.strides[0], color_source.ctypes.data)

    verts = np.asarray(points.get_vertices(2)).reshape(h, w, 3)
    texcoords = np.asarray(points.get_texture_coordinates(2))

    if len(vertex_list.vertices) != verts.size:
        vertex_list.resize(verts.size // 3)
        # need to reassign after resizing
        vertex_list.vertices = verts.ravel()
        vertex_list.tex_coords = texcoords.ravel()

    copy(vertex_list.vertices, verts)
    copy(vertex_list.tex_coords, texcoords)
Exemplo n.º 23
0
def get_frame():
    # Create a pipeline
    pipeline = rs.pipeline()
    print('pipeline started')
    # Create a config and configure the pipeline to stream
    # different resolutions of color and depth streams
    config = rs.config()

    print("Setting up config of running in live data")
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: " , depth_scale)

    # We will be removing the background of objects more than
    #  clipping_distance_in_meters meters away
    #clipping_distance_in_meters = 15 #10 meter
    #clipping_distance = clipping_distance_in_meters / depth_scale

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    while True:

        # Declare pointcloud object, for calculating pointclouds and texture mappings
        pc = rs.pointcloud()

        # We want the points object to be persistent so we can display the last cloud when a frame drops
        points = rs.points()

        # Get frameset of color and depth
        frames = pipeline.wait_for_frames()
        # frames.get_depth_frame() is a 640x360 depth image
        
        # Align the depth frame to color frame
        aligned_frames = align.process(frames)
        
        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()
        
        # Tell pointcloud object to map to this color frame
        pc.map_to(color_frame)

        # Generate the pointcloud and texture mappings
        points = pc.calculate(aligned_depth_frame)

        points_array = np.asanyarray(points.get_vertices())

        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            continue
        
        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        
        # Remove background - Set pixels further than clipping_distance to grey
        grey_color = 153
        depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
        #bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
        
        # Render images
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
        #images = np.hstack((bg_removed, depth_colormap))

        yield color_image, depth_image, points_array
Exemplo n.º 24
0
def camera_setup():
    dir_path = os.path.dirname(os.path.realpath(__file__))
    # Append to syspath the path to openpose python build
    sys.path.append('/home/naoskin/openpose140/build/python')
    try:
        from openpose import *
    except:
        raise Exception(
            'Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?'
        )

    # Parameters for OpenPose.
    # Take a look at C++ OpenPose example for meaning of components.
    params = dict()
    params["logging_level"] = 4
    params["output_resolution"] = "-1x-1"
    params["net_resolution"] = "-1x480"
    params["model_pose"] = "COCO"
    params["alpha_pose"] = 0.6
    params["scale_gap"] = 0.3
    params["scale_number"] = 1
    params["render_threshold"] = 0.05
    params["render_pose"] = True
    params["num_gpu_start"] = 0
    params["disable_blending"] = False
    params["number_people_max"] = 1
    params["download_heatmaps"] = False
    params["heatmaps_add_parts"] = True
    params["heatmaps_add_PAFs"] = True
    # params["tracking"] = 5
    # params["number_people_max"] = 1
    # Ensure you point to the correct path where models are located
    params["default_model_folder"] = "/home/naoskin/openpose140/models/"
    params["display"] = 0
    # Construct OpenPose object allocates GPU memory
    openpose = OpenPose(params)

    depth_fps = 90

    # setup_advanced_camera()

    # Declare pointcloud object, for calculating pointclouds and texture mappings
    pc = rs.pointcloud()
    # We want the points object to be persistent so we can display the last cloud when a frame drops
    points = rs.points()

    # Create a pipeline
    pipeline = rs.pipeline()

    # Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    config = rs.config()
    # This is the minimal recommended resolution
    config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 90)
    config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)

    # Start streaming
    profile = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    # align_to = rs.stream.depth
    align_to = rs.stream.color

    align = rs.align(align_to)

    print("Camera setup ready.")

    return align, depth_scale, openpose, pc, points, pipeline, profile
Exemplo n.º 25
0
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2017 Intel Corporation. All Rights Reserved.

#####################################################
##                  Export to PLY                  ##
#####################################################

# First import the library
import pyrealsense2 as rs


# Declare pointcloud object, for calculating pointclouds and texture mappings
pc = rs.pointcloud()
# We want the points object to be persistent so we can display the last cloud when a frame drops
points = rs.points()

# Declare RealSense pipeline, encapsulating the actual device and sensors
pipe = rs.pipeline()
#Start streaming with default recommended configuration
pipe.start();

try:
    # Wait for the next set of frames from the camera
    frames = pipe.wait_for_frames()

    # Fetch color and depth frames
    depth = frames.get_depth_frame()
    color = frames.get_color_frame()

    # Tell pointcloud object to map to this color frame
    pc.map_to(color)
Exemplo n.º 26
0
def getDepthFrames(nImages=10,
                   resolution=(1280, 720),
                   fps=30,
                   sleepTime=5,
                   laser=False,
                   outputFolder='./'):
    if nImages < 1:
        raise ValueError()

    if sleepTime < 0:
        raise ValueError()

    if not os.path.isdir(outputFolder):
        os.makedirs(outputFolder)

    pointcloud = rs.pointcloud()  # type: rs.pointcloud
    points = rs.points()  # type: rs.points

    config = rs.config()  # type: rs.config
    config.enable_stream(rs.stream.depth, resolution[0], resolution[1],
                         rs.format.z16, fps)
    config.enable_stream(rs.stream.color, resolution[0], resolution[1],
                         rs.format.rgb8, fps)

    align = rs.align(rs.stream.depth)  # type: rs.align

    pipeline = rs.pipeline()  # type: rs.pipeline
    profile = pipeline.start(config)  # type: rs.pipeline_profile
    device = profile.get_device()  # type: rs.device
    depthSensor = device.first_depth_sensor()  # type: rs.depth_sensor

    if depthSensor.supports(rs.option.emitter_enabled):
        depthSensor.set_option(rs.option.emitter_enabled,
                               1.0 if laser else 0.0)
    elif laser:
        raise EnvironmentError('Device does not support laser')

    np.savez(
        os.path.join(outputFolder, 'depth.scale.npz'),
        depthscale=depthSensor.get_depth_scale(),
    )

    counter = 0

    print('Writing depth intrinsics')
    print('')
    while True:
        frames = pipeline.wait_for_frames()  # type: rs.frameset
        alignedFrames = align.process(frames)  # type: rs.frameset
        depth = alignedFrames.get_depth_frame()  # type: rs.depth_frame

        if depth:
            depthIntrinsics = intrinsics_to_dict(
                depth.get_profile().as_video_stream_profile().get_intrinsics())

            with open(os.path.join(outputFolder, 'depth.intrinsics.pkl'),
                      'wb') as f:
                pickle.dump(depthIntrinsics, f)

            break

    while True:
        print('Waiting for frame {:d}'.format(counter))
        frames = pipeline.wait_for_frames()  # type: rs.frameset
        alignedFrames = align.process(frames)  # type: rs.frameset
        depth = alignedFrames.get_depth_frame()  # type: rs.depth_frame
        color = alignedFrames.get_color_frame()  # type: rs.video_frame

        if not depth:
            print('No depth data, retry...')
            continue

        if not color:
            print('No color data, retry...')
            continue

        print('Depth data available')
        depthImage = np.asanyarray(depth.get_data())

        print('Writing raw numpy file')
        np.savez(os.path.join(outputFolder,
                              '{:d}.depth.image.npz'.format(counter)),
                 depthImage=depthImage)

        print('Generating point cloud')
        pointcloud.map_to(color)
        points = pointcloud.calculate(depth)  # type: rs.points

        print('Writing point cloud to PLY file')
        points.export_to_ply(
            os.path.join(outputFolder, '{:d}.pointcloud.ply'.format(counter)),
            color)

        counter = counter + 1

        if counter >= nImages:
            break

        print('Sleeping before next frame')
        print('')
        time.sleep(sleepTime)

    pipeline.stop()
def main():
    # ROS node initialization
    rospy.init_node('perception', anonymous=True)
    moveit_commander.roscpp_initialize(sys.argv)
    robot = moveit_commander.RobotCommander()
    scene = moveit_commander.PlanningSceneInterface()
    group = moveit_commander.MoveGroupCommander("manipulator")
    group.set_planner_id('RRTConnectkConfigDefault')
    group.set_num_planning_attempts(5)
    group.set_planning_time(5)
    group.set_max_velocity_scaling_factor(0.5)

    # get the transformation between robot base and camera
    tfBuffer = tf2_ros.Buffer(rospy.Duration(1200.0))
    listener = tf2_ros.TransformListener(tfBuffer)
    try:
        trans = tfBuffer.lookup_transform('world', 'camera_color_optical_frame', rospy.Time(0), rospy.Duration(1.0))
    except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
        print "Find transform failed"

    # define and go to the home pose waiting for picking instruction
    home_joint_position = [-80.25/180*3.14, 2.0/180*3.14, 125.8/180*3.14, 34.1/180*3.14, 96.1/180*3.14, 12.7/180*3.14]
    group.set_joint_value_target(home_joint_position)
    plan = group.plan()
    # raw_input('Press enter to go the home position: ')
    group.execute(plan)
    time.sleep(1)

    # initiate game
    game = TicTacToe()

    # initiate realsense
    points = rs.points()
    pipeline= rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    profile = pipeline.start(config)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    align_to = rs.stream.color
    align = rs.align(align_to)

    # capture the initial image,
    time.sleep(3)
    frames = pipeline.wait_for_frames()
    aligned_frames = align.process(frames)
    aligned_depth_frame = aligned_frames.get_depth_frame()
    color_frame = aligned_frames.get_color_frame()
    depth_image_background = np.asanyarray(aligned_depth_frame.get_data())
    color_image_background = np.asanyarray(color_frame.get_data())

    # TODO: detect the tic tac toe game board, compute all the 9 positions of placing X and O


    # TODO: define a function to detect the human's move from color image
    def detect_person_move(color_image, game):
        pass

    # start the game
    game.init_game()

    while len(game.empty_cells(game.board)) > 0 and game.gameOver() == False:
        # TODO: Ask the human to place her piece X, detect the move and update the state of the game board
        if game.first == 'N':
            game.ai_turn(self.c_choice, self.h_choice)
            game.first = ''

        game.human_turn(self.c_choice, self.h_choice, detector)
        game.ai_turn(game.c_choice, game.h_choice)

        if game.gameOver() == True:
            break

        # TODO: calculate robot's move and execute it


    print("Game Over. " + game.whoWon() + " Wins")
Exemplo n.º 28
0
def main():
    # Create object for parsing command-line options
    parser = argparse.ArgumentParser(description="Read recorded bag file and display depth stream in jet colormap.\
                                    Remember to change the stream resolution, fps and format to match the recorded.")
    # Add argument which takes path to a bag file as an input
    parser.add_argument("-b", "--bag", type=str, help="Path to the bag file", default="/home/cel/Umich/EECS545/FinalProject/data/RS/A/A_9.bag")
    parser.add_argument("-d", "--depth_map_path", type=str, help="Path to the output depth map h5 file", default="/home/cel/Umich/EECS545/FinalProject/data/RS/A/rs_a_9_depth_map.h5")
    parser.add_argument("-df", "--depth_folder", type=str, help="Path to the output depth image folder", default="/home/cel/Umich/EECS545/FinalProject/data/RS/A/rs_a_9_depth_npy/")
    parser.add_argument("-p", "--point_cloud_path", type=str, help="Path to the output point cloud h5 file", default="/home/cel/Umich/EECS545/FinalProject/data/RS/A/rs_a_9_point_cloud.h5")
    parser.add_argument("-pf", "--point_cloud_folder", type=str, help="Path to the output point cloud h5 file", default="/home/cel/Umich/EECS545/FinalProject/data/RS/A/rs_a_9_point_clouds/")
    parser.add_argument("-if", "--image_folder", type=str, help="Path to the output image folder", default="/home/cel/Umich/EECS545/FinalProject/data/RS/A/rs_a_9_test_images/")
    parser.add_argument("-dif", "--depth_image_folder", type=str, help="Path to the output image folder", default="/home/cel/Umich/EECS545/FinalProject/data/RS/A/rs_a_9_depth_images/")
    # Parse the command line arguments to an object
    args = parser.parse_args()
    # Safety if no parameter have been given
    if not args.bag:
        print("No input paramater have been given.")
        print("For help type --help")
        exit()
    # Check if the given file have bag extension
    if os.path.splitext(args.bag)[1] != ".bag":
        print("The given file is not of correct file format.")
        print("Only .bag files are accepted")
        exit()

    # remove existing hdf5 file
    try:
        os.remove(args.depth_map_path)
        os.remove(args.point_cloud_path)
    except:
        print('no previous h5 file')
    
    # creat path for saving images
    try:
        os.mkdir(args.image_folder)
    except:
        print('image folder already exist')
    try:
        os.mkdir(args.depth_folder)
    except:
        print('depth folder already exist')
    try:
        os.mkdir(args.point_cloud_folder)
    except:
        print('point cloud folder already exist')
    try:
        os.mkdir(args.depth_image_folder)
    except:
        print('depth image folder already exist')

    W = 424
    H = 240

    # write our own h5 file from realsense bag
    f_rs_depth = h5py.File(args.depth_map_path, "a")
    depth_map_id = f_rs_depth.create_dataset('id', (1,), maxshape=(None,), dtype='uint8', chunks=(1,))
    depth_map_from_bag = f_rs_depth.create_dataset('data', (1,H,W), maxshape=(None,H,W), dtype='float16', chunks=(1,H,W))
    depth_map_id[0] = 0
    depth_map_from_bag[0, :, :] = np.zeros((H, W))
    
    f_rs_point_cloud = h5py.File(args.point_cloud_path, "a")
    pcd_id = f_rs_point_cloud.create_dataset('id', (1,), maxshape=(None,), dtype='uint8', chunks=(1,))
    pcd_from_bag = f_rs_point_cloud.create_dataset('data', (1,W*H,3), maxshape=(None,W*H,3), dtype='float16', chunks=(1,W*H,3))
    pcd_id[0] = 0
    pcd_from_bag[0, :, :] = np.zeros((W*H, 3))

    # initialize
    index_list = []

    try:
        # Create pipeline
        pipeline = rs.pipeline()

        # Create a config object
        config = rs.config()

        # Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
        rs.config.enable_device_from_file(config, args.bag, repeat_playback=False)

        # Configure the pipeline to stream the depth stream & color stream
        config.enable_stream(rs.stream.depth, W, H, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, W, H, rs.format.rgb8, 30)

        # Start streaming from file
        pipeline.start(config)
               
        index = 0

        # Create colorizer object
        colorizer = rs.colorizer()

        # Streaming loop
        while True:
            # Get frameset of depth
            frames = pipeline.wait_for_frames()

            # Get depth frame
            depth_frame = frames.get_depth_frame()

            # Get color frame
            color_frame = frames.get_color_frame()

            # save to list
            index_list.append(index)

            # save depth image
            depth_image = np.asanyarray(depth_frame.get_data()) / 1000
            np.save(args.depth_folder+str(index)+".npy", depth_image)

            # Colorize depth frame to jet colormap
            depth_color_frame = colorizer.colorize(depth_frame)

            # Convert depth_frame to numpy array to render image in opencv
            depth_color_image = np.asanyarray(depth_color_frame.get_data())
            depth_color_image_flipped = cv2.flip(depth_color_image, 1)
            cv2.imwrite(args.depth_image_folder+str(index)+".png", depth_color_image_flipped)

            # save color image
            color_image = np.asanyarray(color_frame.get_data())
            color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
            cv2.imwrite(args.image_folder+str(index)+".png", color_image)

            # save point cloud
            pc = rs.pointcloud()
            points = rs.points()
            pc.map_to(color_frame)
            points = pc.calculate(depth_frame)
            coordinates = np.ndarray(buffer=points.get_vertices(), dtype=np.float16, shape=(W*H, 3))
            np.save(args.point_cloud_folder+str(index)+".npy", coordinates)

            index += 1

    except RuntimeError:
        print("No more frames arrived, reached end of BAG file!")

    finally:
        print('total', len(index_list), 'frames read')

        for index in index_list:
            depth_npy = np.load(args.depth_folder+str(index)+".npy")
            point_cloud_npy = np.load(args.point_cloud_folder+str(index)+".npy")

            # save depth map
            if index == 0:
                # depth
                depth_map_from_bag[-1:,:,:] = depth_npy
                # point cloud
                pcd_from_bag[-1:,:,:] = point_cloud_npy
            else:
                # depth
                depth_map_id.resize(depth_map_id.shape[0]+1, axis=0)   
                depth_map_id[-1:] = index
                depth_map_from_bag.resize((depth_map_from_bag.shape[0]+1, depth_map_from_bag.shape[1], depth_map_from_bag.shape[2]))   
                depth_map_from_bag[-1:,:,:] = depth_npy
                # point cloud
                pcd_id.resize(pcd_id.shape[0]+1, axis=0)   
                pcd_id[-1:] = index
                pcd_from_bag.resize((pcd_from_bag.shape[0]+1, pcd_from_bag.shape[1], pcd_from_bag.shape[2]))   
                pcd_from_bag[-1:,:,:] = point_cloud_npy

        print('depth map h5 file saved:', depth_map_from_bag.shape)
        print('point cloud h5 file saved:', pcd_from_bag.shape)
        
        f_rs_depth.close()
        f_rs_point_cloud.close()
    def save_pointcloud_to_ply(self,
                               bagfile_dir,
                               n_img=50,
                               interval=1,
                               filter=False):

        # -------------------------------------------- #
        # Read the bag file and save point cloud to .ply file
        # n_img: number of matrix you want to save
        # interval: number of frames between saved matrix
        # -------------------------------------------- #

        import pyrealsense2 as rs

        try:
            # Declare pointcloud object, for calculating pointclouds and texture mappings
            pc = rs.pointcloud()

            # We want the points object to be persistent so we can display the last cloud when a frame drops
            points = rs.points()

            # Create pipeline
            pipeline = rs.pipeline()

            # Create a config object
            config = rs.config()

            # Tell config that we will use a recorded device from filem to be used by the pipeline through playback.
            rs.config.enable_device_from_file(config, bagfile_dir)

            # Configure the pipeline to stream the depth and color stream
            #config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
            #config.enable_stream(rs.stream.color, 1280, 720, rs.format.rgb8, 30)
            config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
            config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)

            # Start streaming from file
            profile = pipeline.start(config)

            # Declare filters
            #dec_filter = rs.decimation_filter()   # Decimation - reduces depth frame density

            spat_filter = rs.spatial_filter(
            )  # Spatial    - edge-preserving spatial smoothing

            temp_filter = rs.temporal_filter(
            )  # Temporal   - reduces temporal noise

            fill_filter = rs.hole_filling_filter()  # Hole Filling filter

            disp_filter = rs.disparity_transform(
            )  # Disparity Transform filter

            depth_sensor = profile.get_device().first_depth_sensor()
            depth_scale = depth_sensor.get_depth_scale()
            print("Depth Scale is: ", depth_scale)

            count = 0

            for i in range((n_img - 1) * interval + 1):

                if (i % interval != 0):

                    # Get frameset of depth
                    frames = pipeline.wait_for_frames()

                    continue

                else:

                    # Get frameset of depth
                    frames = pipeline.wait_for_frames()

                    # Get depth frame
                    depth_frame = frames.get_depth_frame()
                    color_frame = frames.get_color_frame()

                    if (filter):
                        # Perform filtering
                        #filtered = dec_filter.process(depth_frame)

                        filtered = spat_filter.process(depth_frame)

                        #filtered = fill_filter.process(filtered)

                        depth_frame = temp_filter.process(filtered)

                        #color_frame = dec_filter.process(color_frame)

                    # Tell pointcloud object to map to this color frame
                    pc.map_to(color_frame)

                    # Generate the pointcloud and texture mappings
                    points = pc.calculate(depth_frame)

                    # generate filename
                    filename = self.save_dir + "pc" + "%04d" % count + ".ply"

                    # Save raw data into numpy matrix file
                    points.export_to_ply(filename, color_frame)

                    count += 1

                    print("save %d th point cloud from %d th frame to" %
                          (count, i) + filename)
        finally:
            pass
        return
Exemplo n.º 30
0
uStep = MICRO_STEPS.ustep_8
mechGain = MECH_GAIN.ballscrew_10mm_turn
mm.configAxis(axis, uStep, mechGain)
print("Axis " + str(axis) + " configured with " + str(uStep) +
      " microstepping and " + str(mechGain) + "mm/turn mechanical gain")

# Configure the axis direction
ax_direction = DIRECTION.POSITIVE
mm.configAxisDirection(axis, ax_direction)
print("Axis direction set to " + str(ax_direction))

#New variable declaration
# Declare pointcloud object, for calculating pointclouds and texture mappings
pc = rs.pointcloud()
# We want the points object to be persistent so we can display the last cloud when a frame drops
points = rs.points()

### real sense initialization ####

pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1920, 1080, rs.format.rgb8, 30)
print('Real sense depth and rgb streams initialized')

# Home mm at the beginning of each new rep and treatment

if mm.getCurrentPositions()[2] != 0.0:
    mm.configHomingSpeed(axis, 5)
    mm.emitHome(axis)
    print('mm is going home')
Exemplo n.º 31
0
def detect(save_img=False):
    # Declare pointcloud object, for calculating pointclouds and texture mappings
    pc = rs.pointcloud()
    # We want the points object to be persistent so we can display the last cloud when a frame drops
    points = rs.points()

    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30)
    config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)

    # Start streaming
    pipe_profile = pipeline.start(config)

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    frames = pipeline.wait_for_frames()
    aligned_frames = align.process(frames)
    depth_frame = aligned_frames.get_depth_frame()
    color_frame = aligned_frames.get_color_frame()
    #time.sleep(8)
    img_color = np.array(color_frame.get_data())
    img_depth = np.array(depth_frame.get_data())
    cv2.imwrite('/home/xcy/workspace/read.jpg', img_color)

    #detection section
    img_size = (
        320, 192
    ) if ONNX_EXPORT else opt.img_size  # (320, 192) or (416, 256) or (608, 352) for (height, width)
    out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt
    webcam = source == '0' or source.startswith('rtsp') or source.startswith(
        'http') or source.endswith('.txt')

    # Initialize
    device = torch_utils.select_device(
        device='cpu' if ONNX_EXPORT else opt.device)
    if os.path.exists(out):
        shutil.rmtree(out)  # delete output folder
    os.makedirs(out)  # make new output folder

    # Initialize model
    model = Darknet(opt.cfg, img_size)

    # Load weights
    attempt_download(weights)
    if weights.endswith('.pt'):  # pytorch format
        model.load_state_dict(
            torch.load(weights, map_location=device)['model'])
    else:  # darknet format
        load_darknet_weights(model, weights)

    # Second-stage classifier
    classify = False
    if classify:
        modelc = torch_utils.load_classifier(name='resnet101',
                                             n=2)  # initialize
        modelc.load_state_dict(
            torch.load('weights/resnet101.pt',
                       map_location=device)['model'])  # load weights
        modelc.to(device).eval()

    # Eval mode
    model.to(device).eval()

    # Fuse Conv2d + BatchNorm2d layers
    # model.fuse()

    # Export mode
    if ONNX_EXPORT:
        model.fuse()
        img = torch.zeros((1, 3) + img_size)  # (1, 3, 320, 192)
        f = opt.weights.replace(opt.weights.split('.')[-1],
                                'onnx')  # *.onnx filename
        torch.onnx.export(model, img, f, verbose=False, opset_version=11)

        # Validate exported model
        import onnx
        model = onnx.load(f)  # Load the ONNX model
        onnx.checker.check_model(model)  # Check that the IR is well formed
        print(onnx.helper.printable_graph(
            model.graph))  # Print a human readable representation of the graph
        return

    # Half precision
    half = half and device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Set Dataloader
    vid_path, vid_writer = None, None
    if webcam:
        view_img = True
        torch.backends.cudnn.benchmark = True  # set True to speed up constant image size inference
        dataset = LoadStreams(source, img_size=img_size)
    else:
        save_img = True
        dataset = LoadImages(source, img_size=img_size)

    # Get names and colors
    names = load_classes(opt.names)
    colors = [[random.randint(0, 255) for _ in range(3)]
              for _ in range(len(names))]

    # Run inference
    t0 = time.time()
    _ = model(torch.zeros(
        (1, 3, img_size, img_size),
        device=device)) if device.type != 'cpu' else None  # run once
    for path, img, im0s, vid_cap in dataset:
        img = torch.from_numpy(img).to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)

        # Inference
        t1 = torch_utils.time_synchronized()
        pred = model(img, augment=opt.augment)[0]
        t2 = torch_utils.time_synchronized()

        # to float
        if half:
            pred = pred.float()

        # Apply NMS
        pred = non_max_suppression(pred,
                                   opt.conf_thres,
                                   opt.iou_thres,
                                   multi_label=False,
                                   classes=opt.classes,
                                   agnostic=opt.agnostic_nms)

        # Apply Classifier
        if classify:
            pred = apply_classifier(pred, modelc, img, im0s)

        # Process detections
        for i, det in enumerate(pred):  # detections per image
            if webcam:  # batch_size >= 1
                p, s, im0 = path[i], '%g: ' % i, im0s[i]
            else:
                p, s, im0 = path, '', im0s

            save_path = str(Path(out) / Path(p).name)
            s += '%gx%g ' % img.shape[2:]  # print string
            if det is not None and len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()

                # Print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, names[int(c)])  # add to string
                stop_num = 0
                #print('c = ',len(det))

                # Write results
                for *xyxy, conf, cls in det:
                    c1, c2 = (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]),
                                                            int(xyxy[3]))
                    c0 = ((int(xyxy[0]) + int(xyxy[2])) / 2,
                          (int(xyxy[1]) + int(xyxy[3])) / 2)
                    if save_txt:  # Write to file
                        with open(save_path + '.txt', 'a') as file:
                            file.write(('%g ' * 6 + '\n') % (*xyxy, cls, conf))

                    if save_img or view_img:  # Add bbox to image
                        label = '%s %.2f' % (names[int(cls)], conf)
                        plot_one_box(xyxy,
                                     im0,
                                     label=label,
                                     color=colors[int(cls)])

                    #print(c0)

                    if int(cls) == opt.targetclass:
                        x_center = int((int(xyxy[0]) + int(xyxy[2])) / 2)
                        y_center = int((int(xyxy[1]) + int(xyxy[3])) / 2)

                        # Intrinsics and Extrinsics
                        depth_intrin = depth_frame.profile.as_video_stream_profile(
                        ).intrinsics
                        color_intrin = color_frame.profile.as_video_stream_profile(
                        ).intrinsics
                        depth_to_color_extrin = depth_frame.profile.get_extrinsics_to(
                            color_frame.profile)

                        # Depth scale: units of the values inside a depth frame, how to convert the value to units of 1 meter
                        depth_sensor = pipe_profile.get_device(
                        ).first_depth_sensor()
                        depth_scale = depth_sensor.get_depth_scale()

                        # Map depth to color
                        depth_pixel = [240, 320]  # Random pixel
                        depth_point = rs.rs2_deproject_pixel_to_point(
                            depth_intrin, depth_pixel, depth_scale)

                        color_point = rs.rs2_transform_point_to_point(
                            depth_to_color_extrin, depth_point)
                        color_pixel = rs.rs2_project_point_to_pixel(
                            color_intrin, color_point)

                        pc.map_to(color_frame)
                        points = pc.calculate(depth_frame)
                        vtx = np.array(points.get_vertices())
                        tex = np.array(points.get_texture_coordinates())
                        pix_num = 1280 * y_center + x_center

                        point_cloud_value = [
                            np.float(vtx[pix_num][0]),
                            np.float(vtx[pix_num][1]),
                            np.float(vtx[pix_num][2])
                        ]
                        print('point_cloud_value:', point_cloud_value,
                              names[int(cls)], int(cls))
                        #if not np.isnan(np.float(vtx[pix_num][0])) and not np.isinf(np.float(vtx[pix_num][0])):
                        #    os.system('rostopic pub -1 /goal_pose geometry_msgs/PointStamped [0,[0,0],zed_left_camera_optical_frame] [%s,%s,%s]' %(np.float(vtx[pix_num][0]),np.float(vtx[pix_num][1]),np.float(vtx[pix_num][2]))
                        #os.system('rostopic pub -1 /start_plan std_msgs/Bool 1')
                        #else:
                        #os.system('rostopic pub -1 /start_plan std_msgs/Bool 0')
                        #print("Can't estimate point cloud at this position.")
                        break  #only the target class
                    #stop_num += 1
                    #if stop_num >= len(det):
                    #    os.system('rostopic pub -1 /start_plan std_msgs/Bool 0')

            # Print time (inference + NMS)
            print('%sDone. (%.3fs)' % (s, t2 - t1))

            # Stream results
            if view_img:
                cv2.imshow(p, im0)
                if cv2.waitKey(1) == ord('q'):  # q to quit
                    raise StopIteration

            # Save results (image with detections)
            if save_img:
                if dataset.mode == 'images':
                    cv2.imwrite(save_path, im0)
                else:
                    if vid_path != save_path:  # new video
                        vid_path = save_path
                        if isinstance(vid_writer, cv2.VideoWriter):
                            vid_writer.release(
                            )  # release previous video writer

                        fps = vid_cap.get(cv2.CAP_PROP_FPS)
                        w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        vid_writer = cv2.VideoWriter(
                            save_path, cv2.VideoWriter_fourcc(*opt.fourcc),
                            fps, (w, h))
                    vid_writer.write(im0)

    if save_txt or save_img:
        print('Results saved to %s' % os.getcwd() + os.sep + out)
        if platform == 'darwin':  # MacOS
            os.system('open ' + out + ' ' + save_path)

    print('Done. (%.3fs)' % (time.time() - t0))

    # Close the camera
    pipeline.stop()