def declare_cells(_p):
     """
     Implement the virtual function from the base class
     Only cells from which something is forwarded have to be declared
     """
     cells = {}
     cells['throttle'] = ecto.TrueEveryN("Throttle", n=1)
     cells['mat2image'] = ecto_ros.Mat2Image("Mat2Image")
     cells['subscribed_mat2image'] = ecto.If(
         "Subscribed Mat2Image",
         input_tendril_name="has_subscribers",
         cell=cells['mat2image'])
     cells['throttled_mat2image'] = ecto.If(
         "Throttled Mat2Image",
         input_tendril_name="throttle",
         cell=cells['subscribed_mat2image'])
     cells['publisher'] = ecto_sensor_msgs.Publisher_Image(
         "Publisher",
         topic_name="/images/candidate_beans",
         queue_size=2,
         latched=True)
     cells['throttled_publisher'] = ecto.If("Throttled Publisher",
                                            input_tendril_name="throttle",
                                            cell=cells['publisher'])
     subscribers_input, subscribers_output = \
         ecto.EntangledPair(value=cells['throttled_mat2image'].inputs.at('has_subscribers'), source_name="Has Subscribers Input", sink_name="Has Subscribers Output")
     cells['subscribers_input'] = subscribers_input
     cells['subscribers_output'] = subscribers_output
     return cells
示例#2
0
def test_If():
    plasm = ecto.Plasm()
    g = ecto_test.Generate("Generator", step=1.0, start=1.0)
    If = ecto.If(cell=g)
    truer = ecto.TrueEveryN(n=3, count=3)
    plasm.connect(truer['flag'] >> If['__test__'])
    plasm.execute(niter=27)
    assert g.outputs.out == 9  #should have only called execute 9 times.
def test_nested_if():
    plasm = ecto.Plasm()
    g = ecto_test.Generate("Generator", step=1.0, start=1.0)
    inside_if = ecto.If(input_tendril_name="on_threes", cell=g)
    outside_if = ecto.If(input_tendril_name="on_twos", cell=inside_if)
    truer_on_threes = ecto.TrueEveryN(n=3,count=0)
    truer_on_twos = ecto.TrueEveryN(n=2,count=0)
    plasm.connect([
        truer_on_threes['flag'] >> outside_if['on_threes'],
        truer_on_twos['flag'] >> outside_if['on_twos']
    ])
    #for x in range(0,18):
    #    plasm.execute(niter=1)
    #    print("No of times executed: %s of %s" % (g.outputs.out, x))
        
    # executes on the very first iteration (count = 0) and once every 3*2 iterations thereafter
    plasm.execute(niter=18)
    assert g.outputs.out == 3 # should have only called execute 3 times.
    plasm.execute(niter=1)
    assert g.outputs.out == 4 # should have executed once more
示例#4
0
    parser.add_argument('-i,--input',
                        dest='input',
                        help='The input dir. %(default)s',
                        default='./images')
    scheduler_options(parser.add_argument_group('Scheduler'))
    options = parser.parse_args()
    return options


options = parse_args()

images = ImageReader(path=options.input)
orb_m = ORB(n_features=5000, n_levels=10)
rgb2gray = cvtColor(flag=Conversion.RGB2GRAY)
_stats = ORBstats()
stats = ecto.If(cell=_stats)
stats.inputs.__test__ = False
accum = DescriptorAccumulator()
plasm = ecto.Plasm()
plasm.connect(
    images['image'] >> rgb2gray['image'],
    rgb2gray['image'] >> orb_m['image'],
    orb_m['descriptors'] >> accum[:],
    accum[:] >> stats['descriptors'],
)

run_plasm(options, plasm, locals=vars())
_stats.process()
hist = _stats.outputs.distances.toarray()
np.savetxt('hist.txt', hist)
示例#5
0
plasm.connect(ir_reader['image'] >> (ir_detector[:], ir_drawer['input']),
              rgb_reader['image'] >> (rgb_detector[:], rgb_drawer['input']),
              ir_detector['found', 'out'] >> ir_drawer['found', 'points'],
              rgb_detector['found', 'out'] >> rgb_drawer['found', 'points'],
              rgb_drawer[:] >> rgb_display[:],
              ir_drawer[:] >> imshow(name='Depth points')[:])

#triggers
ander = ecto.And(ninput=2)
plasm.connect(
    rgb_detector['found'] >> ander['in1'],
    ir_detector['found'] >> ander['in2'],
)

left_points, right_points = ecto.If('Left Points',
                                    cell=Points2DAccumulator()), ecto.If(
                                        'Right Points',
                                        cell=Points2DAccumulator())
object_points = ecto.If('Object Points', cell=Points3DAccumulator())

calibrator_ = StereoCalibration()
calibrator = ecto.If('calibrator', cell=calibrator_)

plasm.connect(
    rgb_detector['out'] >> left_points['points'],
    rgb_detector['ideal'] >> object_points['points'],
    ir_detector['out'] >> right_points['points'], ander[:] >>
    (left_points['__test__'], right_points['__test__'],
     object_points['__test__']),
    left_points['stacked'] >> calibrator['points_left'],
    object_points['stacked'] >> calibrator['points_object'],
示例#6
0
                    help='The output directory. Default: %(default)s',
                    default='./images')

#ecto options
scheduler_options(parser.add_argument_group('Scheduler Options'))
args = parser.parse_args()

if not os.path.exists(args.output):
    os.makedirs(args.output)

source = create_source(
    package_name='image_pipeline',
    source_type='OpenNISource')  #optionally pass keyword args here...

depth_saver = ecto.If(
    'depth saver',
    cell=ImageSaver(
        filename_format=os.path.join(args.output, 'depth%04d.png')))
image_saver = ecto.If(
    'rgb saver',
    cell=ImageSaver(
        filename_format=os.path.join(args.output, 'image%04d.png')))

display = imshow(name='RGB', triggers=dict(save=ord('s')))

plasm = ecto.Plasm()
plasm.connect(
    source['image'] >> (display['image'], image_saver['image']),
    source['depth'] >> (imshow(name='Depth')['image'], depth_saver['image']),
    display['save'] >> (image_saver['__test__'], depth_saver['__test__']))

run_plasm(args, plasm, locals=vars())
示例#7
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired,
                                            sensor, res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []
    
#   Create ros node, subscribing to openni topics
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(argv)
    ecto_ros.init(argv, "object_capture_msd_pcl", False)
    cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points') 
    # openni&freenect requires '/camera/rgb/image_rect_color', openni2 just need '/camera/rgb/image_raw'
    if sensor in ['kinect']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_color')
    elif sensor in ['xtion']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_raw')
    depth_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("depth_image_sub", topic_name='/camera/depth_registered/image_raw')  
    rgb_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("rgb_camera_info_sub", topic_name='/camera/rgb/camera_info')
    depth_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("depth_camera_info_sub", topic_name='/camera/depth_registered/camera_info')
        
    
#   Converte ros topics to cv types
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
    image2cv = ecto_ros.Image2Mat()
    depth2cv = ecto_ros.Image2Mat()
    rgb_info2cv = ecto_ros.CameraInfo2Cv()
    depth_info2cv = ecto_ros.CameraInfo2Cv()


    graph += [ cloud_sub['output'] >> msg2cloud[:],
               rgb_image_sub[:] >> image2cv[:],
               depth_image_sub[:] >> depth2cv[:],
               rgb_camera_info_sub[:] >> rgb_info2cv[:],
               depth_camera_info_sub[:] >> depth_info2cv[:]
                ]
    """            
    rgb_display = highgui.imshow(name='rgb')
    depth_display = highgui.imshow(name='depth')
    graph += [image2cv[:] >> rgb_display[:],
              depth2cv[:] >> depth_display[:],
            ]
            
    #   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ msg2cloud[:] >> viewer[:] ]
    """   
#   Process pointcloud

    # Cut off some parts
    
    cut_x = PassThrough(filter_field_name="x", 
                        filter_limit_min=-0.2,
                        filter_limit_max=0.2)
                        
    cut_y = PassThrough(filter_field_name="y", 
                        filter_limit_min=-0.5,
                        filter_limit_max=0.5)
    cut_z = PassThrough(filter_field_name="z",    
                        filter_limit_min=-0.0,
                        filter_limit_max=1.0)
                        
    graph += [ msg2cloud[:] >> cut_x[:],
               cut_x[:] >> cut_y[:],
               cut_y[:] >> cut_z[:] ]
               
    # Voxel filter
    
    voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.002)
    
    graph += [ cut_z[:] >> voxel_grid[:] ]
               
    # Find plane and delete it
    
    normals = NormalEstimation("normals", k_search=0, radius_search=0.02)
    
    graph += [ voxel_grid[:] >> normals[:] ]
    
    plane_finder = SACSegmentationFromNormals("planar_segmentation",
                                                 model_type=SACMODEL_NORMAL_PLANE,
                                                 eps_angle=0.09, distance_threshold=0.1)
    
    plane_deleter = ExtractIndices(negative=True)
    
    graph += [ voxel_grid[:] >> plane_finder['input'],
               normals[:] >> plane_finder['normals'] ]
    graph += [ voxel_grid[:]  >> plane_deleter['input'],
               plane_finder['inliers'] >> plane_deleter['indices'] ]
#   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ plane_deleter[:] >> viewer[:] ]
    
#   Compute vfh
    
    vfh_signature = VFHEstimation(radius_search=0.01)
    
    graph += [ plane_deleter[:] >>  vfh_signature['input'],
               normals[:] >> vfh_signature['normals'] ]                                                          
    
    # convert the image to grayscale                                                                      
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [image2cv[:]  >> rgb2gray[:] ]
                
    # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only           
    poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
    graph += [ image2cv[:] >> poser['color_image'],
               rgb_info2cv['K'] >> poser['K_image'],
               rgb2gray[:] >> poser['image'] ]         #poser gives us R&T from camera to dot pattern
    
    
    points3d = calib.DepthTo3d()
    
    graph += [ depth_info2cv['K'] >> points3d['K'],
               depth2cv['image'] >> points3d['depth']
             ]
    
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    # Convert K if the resolution is different (the camera should output that)
    
    graph += [ # find the normals
                depth_info2cv['K'] >> compute_normals['K'],
                points3d['points3d'] >> compute_normals['points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                depth_info2cv['K'] >> plane_est['K'],
                points3d['points3d'] >> plane_est['points3d'] ]
    

    
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ depth_info2cv['K'] >> pose_filter['K_depth'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]
               
    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))
                                                          
    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]
    
    trainer_ = Trainer()
    
    graph += [ pose_filter['R', 'T'] >> trainer_['R', 'T'],
               vfh_signature['output'] >> trainer_['features'],
               delta_pose['last'] >> trainer_['last'],
               delta_pose['novel'] >> trainer_['novel'],
               plane_deleter[:] >> trainer_['input'] ]
 
    novel = delta_pose['novel']

    cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")

    graph += [ plane_deleter[:] >> cloud2msg[:] ]   
    
    baggers = dict(points=PointCloudBagger(topic_name='/camera/depth_registered/points'),
                   pose=PoseBagger(topic_name='/camera/pose')
                   )
                   
    bagwriter = ecto.If('Bag Writer if R|T', cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name))
    pcd_writer = ecto.If('msd pcd writer', cell = PCDWriter())
    
    graph += [    cloud2msg[:] >> bagwriter['points'],
                  poseMsg['pose'] >> bagwriter['pose']
                  ]
    graph += [ plane_deleter['output'] >> pcd_writer['input'] ]
    
    delta_pose.inputs.__test__ = True
    pcd_writer.inputs.__test__ = True
    
    graph += [novel >> (bagwriter['__test__'])]  
    graph += [novel >> (pcd_writer['__test__'])]
    
    rt2pose = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")
    rt2pose_filtered = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")      
    posePub = PosePublisher("pose_pub", topic_name='pattern_pose')
    posePub_filtered = PosePublisher("pose_pub_filtered", topic_name='pattern_pose_filtered')
    graph += [ poser['R', 'T'] >> rt2pose['R', 'T'],
               rt2pose['pose'] >> posePub['input'],
               pose_filter['R', 'T'] >> rt2pose_filtered['R', 'T'],
               rt2pose_filtered['pose'] >> posePub_filtered['input'] ]
   
    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
示例#8
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired=72,
                                            orb_template='', res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []

    # try several parameter combinations
    source = create_source('image_pipeline', 'OpenNISource', outputs_list=['K_depth', 'K_image', 'camera', 'image',
                                                                           'depth', 'points3d',
                                                                           'mask_depth'], res=res, fps=fps)

    # convert the image to grayscale
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [source['image'] >> rgb2gray[:] ]

    # Find planes
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    # Convert K if the resolution is different (the camera should output that)
    graph += [ # find the normals
                source['K_depth', 'points3d'] >> compute_normals['K', 'points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                source['K_depth', 'points3d'] >> plane_est['K', 'points3d'] ]

    if orb_template:
        # find the pose using ORB
        poser = OrbPoseEstimator(directory=orb_template, show_matches=orb_matches)
        graph += [ source['image', 'K_image', 'mask_depth', 'points3d'] >> poser['color_image', 'K_image', 'mask', 'points3d'],
                   rgb2gray[:] >> poser['image']
                 ]
    else:
        # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only
        poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
        graph += [ source['image', 'K_image'] >> poser['color_image', 'K_image'],
                   rgb2gray[:] >> poser['image'] ]

    # filter the previous pose and resolve the scale ambiguity using 3d
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ source['K_depth'] >> pose_filter['K_depth'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]

    # draw the found pose
    pose_drawer = calib.PoseDrawer('Pose Draw')
    display = highgui.imshow(name='Poses')
    graph += [ pose_filter['found'] >> pose_drawer['trigger'],
               poser['debug_image'] >> pose_drawer['image'],
               source['K_image'] >> pose_drawer['K'],
               pose_filter['R', 'T'] >> pose_drawer['R', 'T'],
               pose_drawer['output'] >> display[:] ]

    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))

    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]

    # publish the source data
    rgbMsg = Mat2Image(frame_id='/camera_rgb_optical_frame', swap_rgb=True)
    depthMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ source['depth'] >> depthMsg[:],
               source['image'] >> rgbMsg[:] ]

    # mask out the object
    masker = segmentation_cell
    graph += [ source['points3d'] >> masker['points3d'],
               plane_est['masks', 'planes'] >> masker['masks', 'planes'],
               pose_filter['T'] >> masker['T'] ]

    # publish the mask
    maskMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ masker['mask'] >> maskMsg[:] ]

    camera2cv = CameraModelToCv()
    cameraMsg = Cv2CameraInfo(frame_id='/camera_rgb_optical_frame')
    graph += [source['camera'] >> camera2cv['camera'],
              camera2cv['K', 'D', 'image_size'] >> cameraMsg['K', 'D', 'image_size']
              ]
    #display the mask
    mask_display = object_recognition_capture.MaskDisplay()
    mask_display_highgui = highgui.imshow(name='mask')
    graph += [
              masker['mask'] >> mask_display['mask'],
              source['image'] >> mask_display['image'],
              mask_display['image'] >> mask_display_highgui['image'],
            ]
    if not preview:
        baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   mask=ImageBagger(topic_name='/camera/mask'),
                   pose=PoseBagger(topic_name='/camera/pose'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )
        bagwriter = ecto.If('Bag Writer if R|T',
                            cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                            )

        graph += [
                  rgbMsg[:] >> bagwriter['image'],
                  depthMsg[:] >> bagwriter['depth'],
                  cameraMsg[:] >> (bagwriter['image_ci'], bagwriter['depth_ci']),
                  poseMsg['pose'] >> bagwriter['pose'],
                  maskMsg[:] >> bagwriter['mask'],
                  ]
        novel = delta_pose['novel']
        if use_turn_table:
            table = TurnTable(angle_thresh=angle_thresh)
            ander = ecto.And()
            graph += [
                  table['trigger'] >> (delta_pose['__test__'], ander['in2']),
                  delta_pose['novel'] >> ander['in1'],
                  ]
            novel = ander['out']
        else:
            delta_pose.inputs.__test__ = True

        graph += [novel >> (bagwriter['__test__'])]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
示例#9
0
#!/usr/bin/env python
import ecto
from ecto_opencv.highgui import VideoCapture, imshow, FPSDrawer, ImageSaver

video_cap = VideoCapture(video_device=0)
fps = FPSDrawer()
video_display = imshow(name='video_cap', triggers=dict(save=ord('s')))
saver = ecto.If(
    cell=ImageSaver('saver', filename_format='ecto_image_%05d.jpg', start=1))

plasm = ecto.Plasm()
plasm.connect(
    video_cap['image'] >> fps['image'],
    fps['image'] >> video_display['image'],
    video_display['save'] >> saver['__test__'],
    fps['image'] >> saver['image'],
)

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm, description='Save images from video stream.')
示例#10
0
#!/usr/bin/env python
import ecto
from ecto_opencv.highgui import VideoCapture, FPSDrawer, imshow, ImageSaver
from ecto_opencv.imgproc import BilateralFilter

video_cap = VideoCapture(video_device=0)

fps = FPSDrawer()

video_display = imshow(
    'imshow',
    name='video_cap',
    triggers=dict(save=ord('s')),
)

saver = ecto.If(
    cell=ImageSaver('saver', filename_format='bilateral_%05d.jpg', start=1))

bl_begin = None
bl_end = None

plasm = ecto.Plasm()
#Build up a chain of bilateral filters.
for i in range(1, 10):
    next = BilateralFilter(d=-1, sigmaColor=10, sigmaSpace=5)
    if bl_begin == None:  # beginning case
        bl_begin = next
        bl_end = next
        continue
    plasm.connect(bl_end[:] >> next[:])
    bl_end = next
示例#11
0
        for x in files:
            if '.png' in x:
                num = x.split('_')[1].split('.')[0]
                if num.isdigit():
                    return int(num) + 1
    return 0


rgb_start = init_image_dir('rgb')
ir_start = init_image_dir('ir')
print 'rgb start', rgb_start, 'ir_start', ir_start
if (rgb_start != ir_start):
    print 'please ensure that the images in ir, and rgb are aligned lexographically'
    sys.exit(1)
#saving images
rgb_saver = ecto.If(
    cell=ImageSaver(filename_format='rgb/rgb_%04d.png', start=rgb_start))
ir_saver = ecto.If(
    cell=ImageSaver(filename_format='ir/ir_%04d.png', start=ir_start))

plasm.connect(rgb2gray[:] >> rgb_saver['image'],
              conversion[:] >> ir_saver['image'], ander[:] >>
              (rgb_saver['__test__'], ir_saver['__test__']))

sched = ecto.schedulers.Singlethreaded(plasm)
while sched.execute(niter=5) == 0:
    #swap it modes
    next_mode, capture.params.stream_mode = capture.params.stream_mode, next_mode

print sched.stats()