예제 #1
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired,
                                            sensor, res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []
    
#   Create ros node, subscribing to openni topics
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(argv)
    ecto_ros.init(argv, "object_capture_msd_pcl", False)
    cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points') 
    # openni&freenect requires '/camera/rgb/image_rect_color', openni2 just need '/camera/rgb/image_raw'
    if sensor in ['kinect']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_color')
    elif sensor in ['xtion']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_raw')
    depth_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("depth_image_sub", topic_name='/camera/depth_registered/image_raw')  
    rgb_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("rgb_camera_info_sub", topic_name='/camera/rgb/camera_info')
    depth_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("depth_camera_info_sub", topic_name='/camera/depth_registered/camera_info')
        
    
#   Converte ros topics to cv types
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
    image2cv = ecto_ros.Image2Mat()
    depth2cv = ecto_ros.Image2Mat()
    rgb_info2cv = ecto_ros.CameraInfo2Cv()
    depth_info2cv = ecto_ros.CameraInfo2Cv()


    graph += [ cloud_sub['output'] >> msg2cloud[:],
               rgb_image_sub[:] >> image2cv[:],
               depth_image_sub[:] >> depth2cv[:],
               rgb_camera_info_sub[:] >> rgb_info2cv[:],
               depth_camera_info_sub[:] >> depth_info2cv[:]
                ]
    """            
    rgb_display = highgui.imshow(name='rgb')
    depth_display = highgui.imshow(name='depth')
    graph += [image2cv[:] >> rgb_display[:],
              depth2cv[:] >> depth_display[:],
            ]
            
    #   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ msg2cloud[:] >> viewer[:] ]
    """   
#   Process pointcloud

    # Cut off some parts
    
    cut_x = PassThrough(filter_field_name="x", 
                        filter_limit_min=-0.2,
                        filter_limit_max=0.2)
                        
    cut_y = PassThrough(filter_field_name="y", 
                        filter_limit_min=-0.5,
                        filter_limit_max=0.5)
    cut_z = PassThrough(filter_field_name="z",    
                        filter_limit_min=-0.0,
                        filter_limit_max=1.0)
                        
    graph += [ msg2cloud[:] >> cut_x[:],
               cut_x[:] >> cut_y[:],
               cut_y[:] >> cut_z[:] ]
               
    # Voxel filter
    
    voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.002)
    
    graph += [ cut_z[:] >> voxel_grid[:] ]
               
    # Find plane and delete it
    
    normals = NormalEstimation("normals", k_search=0, radius_search=0.02)
    
    graph += [ voxel_grid[:] >> normals[:] ]
    
    plane_finder = SACSegmentationFromNormals("planar_segmentation",
                                                 model_type=SACMODEL_NORMAL_PLANE,
                                                 eps_angle=0.09, distance_threshold=0.1)
    
    plane_deleter = ExtractIndices(negative=True)
    
    graph += [ voxel_grid[:] >> plane_finder['input'],
               normals[:] >> plane_finder['normals'] ]
    graph += [ voxel_grid[:]  >> plane_deleter['input'],
               plane_finder['inliers'] >> plane_deleter['indices'] ]
#   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ plane_deleter[:] >> viewer[:] ]
    
#   Compute vfh
    
    vfh_signature = VFHEstimation(radius_search=0.01)
    
    graph += [ plane_deleter[:] >>  vfh_signature['input'],
               normals[:] >> vfh_signature['normals'] ]                                                          
    
    # convert the image to grayscale                                                                      
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [image2cv[:]  >> rgb2gray[:] ]
                
    # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only           
    poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
    graph += [ image2cv[:] >> poser['color_image'],
               rgb_info2cv['K'] >> poser['K_image'],
               rgb2gray[:] >> poser['image'] ]         #poser gives us R&T from camera to dot pattern
    
    
    points3d = calib.DepthTo3d()
    
    graph += [ depth_info2cv['K'] >> points3d['K'],
               depth2cv['image'] >> points3d['depth']
             ]
    
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    # Convert K if the resolution is different (the camera should output that)
    
    graph += [ # find the normals
                depth_info2cv['K'] >> compute_normals['K'],
                points3d['points3d'] >> compute_normals['points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                depth_info2cv['K'] >> plane_est['K'],
                points3d['points3d'] >> plane_est['points3d'] ]
    

    
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ depth_info2cv['K'] >> pose_filter['K_depth'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]
               
    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))
                                                          
    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]
    
    trainer_ = Trainer()
    
    graph += [ pose_filter['R', 'T'] >> trainer_['R', 'T'],
               vfh_signature['output'] >> trainer_['features'],
               delta_pose['last'] >> trainer_['last'],
               delta_pose['novel'] >> trainer_['novel'],
               plane_deleter[:] >> trainer_['input'] ]
 
    novel = delta_pose['novel']

    cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")

    graph += [ plane_deleter[:] >> cloud2msg[:] ]   
    
    baggers = dict(points=PointCloudBagger(topic_name='/camera/depth_registered/points'),
                   pose=PoseBagger(topic_name='/camera/pose')
                   )
                   
    bagwriter = ecto.If('Bag Writer if R|T', cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name))
    pcd_writer = ecto.If('msd pcd writer', cell = PCDWriter())
    
    graph += [    cloud2msg[:] >> bagwriter['points'],
                  poseMsg['pose'] >> bagwriter['pose']
                  ]
    graph += [ plane_deleter['output'] >> pcd_writer['input'] ]
    
    delta_pose.inputs.__test__ = True
    pcd_writer.inputs.__test__ = True
    
    graph += [novel >> (bagwriter['__test__'])]  
    graph += [novel >> (pcd_writer['__test__'])]
    
    rt2pose = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")
    rt2pose_filtered = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")      
    posePub = PosePublisher("pose_pub", topic_name='pattern_pose')
    posePub_filtered = PosePublisher("pose_pub_filtered", topic_name='pattern_pose_filtered')
    graph += [ poser['R', 'T'] >> rt2pose['R', 'T'],
               rt2pose['pose'] >> posePub['input'],
               pose_filter['R', 'T'] >> rt2pose_filtered['R', 'T'],
               rt2pose_filtered['pose'] >> posePub_filtered['input'] ]
   
    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
예제 #2
0
    #setup the input source, grayscale conversion
    from ecto_openni import SXGA_RES, FPS_15, VGA_RES, FPS_30
    source = create_source('image_pipeline',
                           'OpenNISource',
                           image_mode=VGA_RES,
                           image_fps=FPS_30)
    rgb2gray = cvtColor('Grayscale', flag=Conversion.RGB2GRAY)
    depth_to_3d = DepthTo3d()
    compute_normals = {}
    draw_normals = {}
    normal_types = [
        RgbdNormalsTypes.LINEMOD, RgbdNormalsTypes.FALS, RgbdNormalsTypes.SRI
    ]
    normal_types = [RgbdNormalsTypes.FALS]
    for type in normal_types:
        compute_normals[type] = ComputeNormals(method=type)
        draw_normals[type] = DrawNormals(step=20)

    #plasm.connect(source['image'] >> rgb2gray ['image'])

    #connect up the pose_est
    connections = [
        source['depth'] >> depth_to_3d['depth'],
        source['K_depth'] >> depth_to_3d['K'],
        source['image'] >> imshow(name='original', waitKey=1)[:]
    ]

    # compute the normals
    for type in [RgbdNormalsTypes.FALS, RgbdNormalsTypes.SRI]:
        if type in normal_types:
            connections += [
예제 #3
0
if __name__ == '__main__':
    options = parse_args()

    plasm = ecto.Plasm()

    #setup the input source, grayscale conversion
    from ecto_openni import SXGA_RES, FPS_15, VGA_RES, FPS_30
    source = create_source('image_pipeline',
                           'OpenNISource',
                           image_mode=VGA_RES,
                           image_fps=FPS_30)
    rgb2gray = cvtColor('Grayscale', flag=Conversion.RGB2GRAY)
    plane_finder = PlaneFinder(threshold=0.03, sensor_error_a=0.0075)
    depth_to_3d = DepthTo3d()
    compute_normals1 = ComputeNormals(method=RgbdNormalsTypes.FALS)
    draw_normals1 = DrawNormals(step=20)
    plane_drawer = PlaneDrawer()

    #plasm.connect(source['image'] >> rgb2gray ['image'])

    #connect up the pose_est
    connections = [
        source['depth_raw'] >> depth_to_3d['depth'],
        source['K'] >> depth_to_3d['K'],
        source['image'] >> imshow(name='original', waitKey=1)[:]
    ]
    connections += [
        source['K'] >> compute_normals1['K'],
        depth_to_3d['points3d'] >> compute_normals1['points3d']
    ]
예제 #4
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired=72,
                                            orb_template='', res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []

    # try several parameter combinations
    source = create_source('image_pipeline', 'OpenNISource', outputs_list=['K_depth', 'K_image', 'camera', 'image',
                                                                           'depth', 'points3d',
                                                                           'mask_depth'], res=res, fps=fps)

    # convert the image to grayscale
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [source['image'] >> rgb2gray[:] ]

    # Find planes
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    # Convert K if the resolution is different (the camera should output that)
    graph += [ # find the normals
                source['K_depth', 'points3d'] >> compute_normals['K', 'points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                source['K_depth', 'points3d'] >> plane_est['K', 'points3d'] ]

    if orb_template:
        # find the pose using ORB
        poser = OrbPoseEstimator(directory=orb_template, show_matches=orb_matches)
        graph += [ source['image', 'K_image', 'mask_depth', 'points3d'] >> poser['color_image', 'K_image', 'mask', 'points3d'],
                   rgb2gray[:] >> poser['image']
                 ]
    else:
        # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only
        poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
        graph += [ source['image', 'K_image'] >> poser['color_image', 'K_image'],
                   rgb2gray[:] >> poser['image'] ]

    # filter the previous pose and resolve the scale ambiguity using 3d
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ source['K_depth'] >> pose_filter['K_depth'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]

    # draw the found pose
    pose_drawer = calib.PoseDrawer('Pose Draw')
    display = highgui.imshow(name='Poses')
    graph += [ pose_filter['found'] >> pose_drawer['trigger'],
               poser['debug_image'] >> pose_drawer['image'],
               source['K_image'] >> pose_drawer['K'],
               pose_filter['R', 'T'] >> pose_drawer['R', 'T'],
               pose_drawer['output'] >> display[:] ]

    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))

    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]

    # publish the source data
    rgbMsg = Mat2Image(frame_id='/camera_rgb_optical_frame', swap_rgb=True)
    depthMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ source['depth'] >> depthMsg[:],
               source['image'] >> rgbMsg[:] ]

    # mask out the object
    masker = segmentation_cell
    graph += [ source['points3d'] >> masker['points3d'],
               plane_est['masks', 'planes'] >> masker['masks', 'planes'],
               pose_filter['T'] >> masker['T'] ]

    # publish the mask
    maskMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ masker['mask'] >> maskMsg[:] ]

    camera2cv = CameraModelToCv()
    cameraMsg = Cv2CameraInfo(frame_id='/camera_rgb_optical_frame')
    graph += [source['camera'] >> camera2cv['camera'],
              camera2cv['K', 'D', 'image_size'] >> cameraMsg['K', 'D', 'image_size']
              ]
    #display the mask
    mask_display = object_recognition_capture.MaskDisplay()
    mask_display_highgui = highgui.imshow(name='mask')
    graph += [
              masker['mask'] >> mask_display['mask'],
              source['image'] >> mask_display['image'],
              mask_display['image'] >> mask_display_highgui['image'],
            ]
    if not preview:
        baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   mask=ImageBagger(topic_name='/camera/mask'),
                   pose=PoseBagger(topic_name='/camera/pose'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )
        bagwriter = ecto.If('Bag Writer if R|T',
                            cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                            )

        graph += [
                  rgbMsg[:] >> bagwriter['image'],
                  depthMsg[:] >> bagwriter['depth'],
                  cameraMsg[:] >> (bagwriter['image_ci'], bagwriter['depth_ci']),
                  poseMsg['pose'] >> bagwriter['pose'],
                  maskMsg[:] >> bagwriter['mask'],
                  ]
        novel = delta_pose['novel']
        if use_turn_table:
            table = TurnTable(angle_thresh=angle_thresh)
            ander = ecto.And()
            graph += [
                  table['trigger'] >> (delta_pose['__test__'], ander['in2']),
                  delta_pose['novel'] >> ander['in1'],
                  ]
            novel = ander['out']
        else:
            delta_pose.inputs.__test__ = True

        graph += [novel >> (bagwriter['__test__'])]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.