Пример #1
0
 def declare_cells(p):
     return {'main': create_source(*('image_pipeline', 'OpenNISubscriber'), **p)}
Пример #2
0
                    default=0,
                    help='The video device number. Default: %(default)s')
parser.add_argument('--width', dest='width', default=640)
parser.add_argument('--height', dest='height', default=480)
parser.add_argument('--preview', action='store_true')

#ecto options
scheduler_options(parser.add_argument_group('Scheduler Options'))
args = parser.parse_args()

if not os.path.exists(args.output):
    os.makedirs(args.output)

#camera = VideoCapture(video_device=1,width=int(args.width),height=int(args.height))
source = create_source(
    package_name='image_pipeline',
    source_type='OpenNISource')  #optionally pass keyword args here...
depth_saver = ImageSaver(
    filename_format=os.path.join(args.output, 'frame_%010d_depth.png'))
image_saver = ImageSaver(
    filename_format=os.path.join(args.output, 'frame_%010d_image.png'))
#mono_saver = ImageSaver(filename_format=os.path.join(args.output, 'frame_%010d_mono.png'))

plasm = ecto.Plasm()
plasm.connect(
    #      camera['image'] >> imshow(name='Mono')['image'],
    source['depth'] >> imshow(name='Depth')['image'],
    source['image'] >> imshow(name='RGB')['image'])

if not args.preview:
    plasm.connect(
Пример #3
0
    import argparse
    parser = argparse.ArgumentParser(description='Find a plane in an RGBD image.')
    scheduler_options(parser.add_argument_group('Scheduler'))
    options = parser.parse_args()

    return options


if __name__ == '__main__':
    options = parse_args()

    plasm = ecto.Plasm()

    #setup the input source, grayscale conversion
    from ecto_openni import SXGA_RES, FPS_15
    source = create_source('image_pipeline','OpenNISource',image_mode=SXGA_RES,image_fps=FPS_15)
    rgb2gray = cvtColor('Grayscale', flag=Conversion.RGB2GRAY)
    plane_finder = PlaneFinder(l=600, nu=100)
    depth_to_3d = DepthTo3d()

    plasm.connect(source['image'] >> rgb2gray ['image'])

    #connect up the pose_est
    connections = [ source['image'] >> plane_finder['image'],
                    source['depth_raw'] >> depth_to_3d['depth'],
                    source['K'] >> depth_to_3d['K'],
                    depth_to_3d['points3d'] >> plane_finder['points3d'],
                    source['K'] >> plane_finder['K'] ]
    connections += [plane_finder['image'] >> imshow(name='hulls')[:],
                    source['image'] >> imshow(name='original')[:]]
    plasm.connect(connections)
Пример #4
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired=72,
                                            orb_template='', res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []

    # try several parameter combinations
    source = create_source('image_pipeline', 'OpenNISource', outputs_list=['K', 'K', 'camera', 'image',
                                                                           'depth', 'points3d',
                                                                           'mask_depth'], res=res, fps=fps)

    # convert the image to grayscale
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [source['image'] >> rgb2gray[:] ]

    # Find planes
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    graph += [ # find the normals
                source['K', 'points3d'] >> compute_normals['K', 'points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                source['K', 'points3d'] >> plane_est['K', 'points3d'] ]

    if orb_template:
        # find the pose using ORB
        poser = OrbPoseEstimator(directory=orb_template, show_matches=orb_matches)
        graph += [ source['image', 'K', 'mask_depth', 'points3d'] >> poser['color_image', 'K', 'mask', 'points3d'],
                   rgb2gray[:] >> poser['image']
                 ]
    else:
        # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only
        poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
        graph += [ source['image', 'K'] >> poser['color_image', 'K'],
                   rgb2gray[:] >> poser['image'] ]

    # filter the previous pose and resolve the scale ambiguity using 3d
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ source['K'] >> pose_filter['K'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]

    # draw the found pose
    pose_drawer = calib.PoseDrawer('Pose Draw')
    display = highgui.imshow(name='Poses')
    graph += [ pose_filter['found'] >> pose_drawer['trigger'],
               poser['debug_image'] >> pose_drawer['image'],
               source['K'] >> pose_drawer['K'],
               pose_filter['R', 'T'] >> pose_drawer['R', 'T'],
               pose_drawer['output'] >> display[:] ]

    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))

    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]

    # publish the source data
    rgbMsg = Mat2Image(frame_id='/camera_rgb_optical_frame', swap_rgb=True)
    depthMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ source['depth'] >> depthMsg[:],
               source['image'] >> rgbMsg[:] ]

    # mask out the object
    masker = segmentation_cell
    graph += [ source['points3d'] >> masker['points3d'],
               plane_est['masks', 'planes'] >> masker['masks', 'planes'],
               pose_filter['T'] >> masker['T'] ]

    # publish the mask
    maskMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ masker['mask'] >> maskMsg[:] ]

    camera2cv = CameraModelToCv()
    cameraMsg = Cv2CameraInfo(frame_id='/camera_rgb_optical_frame')
    graph += [source['camera'] >> camera2cv['camera'],
              camera2cv['K', 'D', 'image_size'] >> cameraMsg['K', 'D', 'image_size']
              ]
    #display the mask
    mask_display = object_recognition_capture.MaskDisplay()
    mask_display_highgui = highgui.imshow(name='mask')
    graph += [
              masker['mask'] >> mask_display['mask'],
              source['image'] >> mask_display['image'],
              mask_display['image'] >> mask_display_highgui['image'],
            ]
    if not preview:
        baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   mask=ImageBagger(topic_name='/camera/mask'),
                   pose=PoseBagger(topic_name='/camera/pose'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )
        bagwriter = ecto.If('Bag Writer if R|T',
                            cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                            )

        graph += [
                  rgbMsg[:] >> bagwriter['image'],
                  depthMsg[:] >> bagwriter['depth'],
                  cameraMsg[:] >> (bagwriter['image_ci'], bagwriter['depth_ci']),
                  poseMsg['pose'] >> bagwriter['pose'],
                  maskMsg[:] >> bagwriter['mask'],
                  ]
        novel = delta_pose['novel']
        if use_turn_table:
            table = TurnTable(angle_thresh=angle_thresh)
            ander = ecto.And()
            graph += [
                  table['trigger'] >> (delta_pose['__test__'], ander['in2']),
                  delta_pose['novel'] >> ander['in1'],
                  ]
            novel = ander['out']
        else:
            delta_pose.inputs.__test__ = True

        graph += [novel >> (bagwriter['__test__'])]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
Пример #5
0
 def declare_cells(self, p):
     return {'main': create_source(*('image_pipeline', 'BagReader'), **p)}
Пример #6
0
    import argparse
    parser = argparse.ArgumentParser(description='Find a plane in an RGBD image.')
    scheduler_options(parser.add_argument_group('Scheduler'))
    options = parser.parse_args()

    return options


if __name__ == '__main__':
    options = parse_args()

    plasm = ecto.Plasm()

    #setup the input source, grayscale conversion
    from ecto_openni import SXGA_RES, FPS_15, VGA_RES, FPS_30
    source = create_source('image_pipeline','OpenNISource',image_mode=VGA_RES,image_fps=FPS_30)
    rgb2gray = cvtColor('Grayscale', flag=Conversion.RGB2GRAY)
    depth_to_3d = DepthTo3d()
    compute_normals = {}
    draw_normals = {}
    normal_types = [ RgbdNormalsTypes.LINEMOD, RgbdNormalsTypes.FALS, RgbdNormalsTypes.SRI ]
    normal_types = [ RgbdNormalsTypes.FALS]
    for type in normal_types:
        compute_normals[type] = ComputeNormals(method=type)
        draw_normals[type] = DrawNormals(step=20)

    #plasm.connect(source['image'] >> rgb2gray ['image'])

    #connect up the pose_est
    connections = [ source['depth'] >> depth_to_3d['depth'],
                    source['K_depth'] >> depth_to_3d['K'],
Пример #7
0
 def declare_cells(self, p):
     return {"main": create_source(*("image_pipeline", "OpenNISource"), **p)}
Пример #8
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired=72,
                                            orb_template='', res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []

    # try several parameter combinations
    source = create_source('image_pipeline', 'OpenNISource', outputs_list=['K_depth', 'K_image', 'camera', 'image',
                                                                           'depth', 'points3d',
                                                                           'mask_depth'], res=res, fps=fps)

    # convert the image to grayscale
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [source['image'] >> rgb2gray[:] ]

    # Find planes
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    # Convert K if the resolution is different (the camera should output that)
    graph += [ # find the normals
                source['K_depth', 'points3d'] >> compute_normals['K', 'points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                source['K_depth', 'points3d'] >> plane_est['K', 'points3d'] ]

    if orb_template:
        # find the pose using ORB
        poser = OrbPoseEstimator(directory=orb_template, show_matches=orb_matches)
        graph += [ source['image', 'K_image', 'mask_depth', 'points3d'] >> poser['color_image', 'K_image', 'mask', 'points3d'],
                   rgb2gray[:] >> poser['image']
                 ]
    else:
        # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only
        poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
        graph += [ source['image', 'K_image'] >> poser['color_image', 'K_image'],
                   rgb2gray[:] >> poser['image'] ]

    # filter the previous pose and resolve the scale ambiguity using 3d
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ source['K_depth'] >> pose_filter['K_depth'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]

    # draw the found pose
    pose_drawer = calib.PoseDrawer('Pose Draw')
    display = highgui.imshow(name='Poses')
    graph += [ pose_filter['found'] >> pose_drawer['trigger'],
               poser['debug_image'] >> pose_drawer['image'],
               source['K_image'] >> pose_drawer['K'],
               pose_filter['R', 'T'] >> pose_drawer['R', 'T'],
               pose_drawer['output'] >> display[:] ]

    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))

    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]

    # publish the source data
    rgbMsg = Mat2Image(frame_id='/camera_rgb_optical_frame', swap_rgb=True)
    depthMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ source['depth'] >> depthMsg[:],
               source['image'] >> rgbMsg[:] ]

    # mask out the object
    masker = segmentation_cell
    graph += [ source['points3d'] >> masker['points3d'],
               plane_est['masks', 'planes'] >> masker['masks', 'planes'],
               pose_filter['T'] >> masker['T'] ]

    # publish the mask
    maskMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ masker['mask'] >> maskMsg[:] ]

    camera2cv = CameraModelToCv()
    cameraMsg = Cv2CameraInfo(frame_id='/camera_rgb_optical_frame')
    graph += [source['camera'] >> camera2cv['camera'],
              camera2cv['K', 'D', 'image_size'] >> cameraMsg['K', 'D', 'image_size']
              ]
    #display the mask
    mask_display = object_recognition_capture.MaskDisplay()
    mask_display_highgui = highgui.imshow(name='mask')
    graph += [
              masker['mask'] >> mask_display['mask'],
              source['image'] >> mask_display['image'],
              mask_display['image'] >> mask_display_highgui['image'],
            ]
    if not preview:
        baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   mask=ImageBagger(topic_name='/camera/mask'),
                   pose=PoseBagger(topic_name='/camera/pose'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )
        bagwriter = ecto.If('Bag Writer if R|T',
                            cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                            )

        graph += [
                  rgbMsg[:] >> bagwriter['image'],
                  depthMsg[:] >> bagwriter['depth'],
                  cameraMsg[:] >> (bagwriter['image_ci'], bagwriter['depth_ci']),
                  poseMsg['pose'] >> bagwriter['pose'],
                  maskMsg[:] >> bagwriter['mask'],
                  ]
        novel = delta_pose['novel']
        if use_turn_table:
            table = TurnTable(angle_thresh=angle_thresh)
            ander = ecto.And()
            graph += [
                  table['trigger'] >> (delta_pose['__test__'], ander['in2']),
                  delta_pose['novel'] >> ander['in1'],
                  ]
            novel = ander['out']
        else:
            delta_pose.inputs.__test__ = True

        graph += [novel >> (bagwriter['__test__'])]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
parser = argparse.ArgumentParser(description='Saves images from a connect on keystroke.')
parser.add_argument('-o,--output', dest='output', help='The output directory. Default: %(default)s', default='./images/%s'%(time.strftime('%Y-%b-%d-%H.%M.%S')))
parser.add_argument('-d,--device', dest='device', default=0, help='The video device number. Default: %(default)s')
parser.add_argument('--width',dest='width',default=640)
parser.add_argument('--height',dest='height',default=480)
parser.add_argument('--preview', action='store_true')

#ecto options
scheduler_options(parser.add_argument_group('Scheduler Options'))
args = parser.parse_args()

if not os.path.exists(args.output):
    os.makedirs(args.output)

#camera = VideoCapture(video_device=1,width=int(args.width),height=int(args.height))
source = create_source(package_name='image_pipeline', source_type='OpenNISource') #optionally pass keyword args here...
depth_saver = ImageSaver(filename_format=os.path.join(args.output, 'frame_%010d_depth.png'))
image_saver = ImageSaver(filename_format=os.path.join(args.output, 'frame_%010d_image.png'))
#mono_saver = ImageSaver(filename_format=os.path.join(args.output, 'frame_%010d_mono.png'))

plasm = ecto.Plasm()
plasm.connect(
#      camera['image'] >> imshow(name='Mono')['image'],
      source['depth'] >> imshow(name='Depth')['image'],
      source['image'] >> imshow(name='RGB')['image']
      )

if not args.preview:
    plasm.connect(
        source['image'] >> image_saver['image'],
    	source['depth'] >> depth_saver['image'],
Пример #10
0
    parser = argparse.ArgumentParser(description="Find a plane in an RGBD image.")
    scheduler_options(parser.add_argument_group("Scheduler"))
    options = parser.parse_args()

    return options


if __name__ == "__main__":
    options = parse_args()

    plasm = ecto.Plasm()

    # setup the input source, grayscale conversion
    from ecto_openni import SXGA_RES, FPS_15

    source = create_source("image_pipeline", "OpenNISource", image_mode=SXGA_RES, image_fps=FPS_15)
    rgb2gray = cvtColor("Grayscale", flag=Conversion.RGB2GRAY)
    plane_finder = PlaneFinder(l=600, nu=100)
    depth_to_3d = DepthTo3d()

    plasm.connect(source["image"] >> rgb2gray["image"])

    # connect up the pose_est
    connections = [
        source["image"] >> plane_finder["image"],
        source["depth_raw"] >> depth_to_3d["depth"],
        source["K"] >> depth_to_3d["K"],
        depth_to_3d["points3d"] >> plane_finder["points3d"],
        source["K"] >> plane_finder["K"],
    ]
    connections += [plane_finder["image"] >> imshow(name="hulls")[:], source["image"] >> imshow(name="original")[:]]
Пример #11
0
 def declare_cells(p):
     return {'main': create_source(*('image_pipeline', 'BagReader'), **p)}