def declare_io(self, p, i, o): self.gray_image = ecto.Passthrough('gray Input') self.gray_image_ref = ecto.Passthrough('gray Input from ref image') self.K = ecto.Passthrough('K') self.points3d = ecto.Passthrough('points3d') self.points3d_ref = ecto.Passthrough('points3d from ref image') self.depth_mask = ecto.Passthrough('mask') self.pose_estimation = MatchRefinementHSvd('Pose Estimation', reprojection_error=3, inlier_thresh=15) self.fps = FPSDrawer('FPS') self.tr = TransformCompose('Transform Composition') #inputs i.declare('K', self.K.inputs.at('in')) i.declare('image', self.gray_image.inputs.at('in')) i.declare('image_ref', self.gray_image_ref.inputs.at('in')) i.declare('mask', self.depth_mask.inputs.at('in')) i.declare('points3d', self.points3d.inputs.at('in')) i.declare('points3d_ref', self.points3d_ref.inputs.at('in')) i.declare('points3d', self.points3d.inputs.at('in')) i.declare('points3d_ref', self.points3d_ref.inputs.at('in')) #outputs o.declare('R', self.tr.outputs.at('R')) o.declare('T', self.tr.outputs.at('T')) o.declare('found', self.pose_estimation.outputs.at('found')) o.declare('debug_image', self.fps.outputs.at('image'))
def ecto_process(conn): import ecto from ecto_opencv.highgui import VideoCapture, imshow, FPSDrawer, ImageJpgWriter updator = ImageUpdator(conn=conn) video_cap = VideoCapture(video_device=0, width=1000, height=1000) fps = FPSDrawer() file = StringIO() writer = ImageJpgWriter(file=ecto.ostream(file)) plasm = ecto.Plasm() plasm.connect(video_cap['image'] >> fps['image'], fps['image'] >> writer['image'], writer['file'] >> updator['ostream'] ) sched = ecto.schedulers.Singlethreaded(plasm) sched.execute()
def parse_args(): import argparse parser = argparse.ArgumentParser( description='Computes the odometry between frames.') scheduler_options(parser.add_argument_group('Scheduler')) options = parser.parse_args() return options if __name__ == '__main__': options = parse_args() video = VideoCapture(video_device=0) lucid = LUCID() draw_kpts = DrawKeypoints() rgb2gray = cvtColor(flag=Conversion.RGB2GRAY) fps = FPSDrawer() plasm = ecto.Plasm() plasm.connect( video['image'] >> rgb2gray['image'], rgb2gray['image'] >> lucid['image'], lucid['keypoints'] >> draw_kpts['keypoints'], video['image'] >> draw_kpts['image'], draw_kpts['image'] >> fps[:], fps[:] >> imshow('eLUCID display', name='LUCID')['image'], ) run_plasm(options, plasm, locals=vars())
#!/usr/bin/env python import ecto from ecto_opencv.highgui import imshow, NiConverter, FPSDrawer from ecto_opencv.imgproc import ConvertTo from ecto_opencv.cv_bp import CV_8UC1 from ecto_openni import OpenNICapture, DEPTH_RGB import argparse from ecto.opts import cell_options parser = argparse.ArgumentParser(description='My awesome program thing.') #add our cells to the parser camera_factory = cell_options(parser, OpenNICapture) args = parser.parse_args() print args #use the factories in conjunction with the parsed arguments, to create our cells. capture = camera_factory(args) fps = FPSDrawer('fps') plasm = ecto.Plasm() plasm.connect( capture['image'] >> fps[:], fps[:] >> imshow('image display', name='image')[:], capture['depth'] >> imshow('depth display', name='depth')[:], ) sched = ecto.schedulers.Singlethreaded(plasm) sched.execute()