def run_plasm(options, plasm, locals={}): ''' Run the plasm given the options from the command line parser. :param options: The command line, preparsed options object. It is assumed that you have filled this object using scheduler_options. :param plasm: The plasm to run. :param locals: Any local variables that you would like available to the iPython shell session. ''' global possible_schedulers if options.scheduler_type not in possible_schedulers: msg = "You must supply a valid scheduler type, not \'%s\'\n" % options.scheduler_type msg += 'Valid schedulers are:\n\t' + '\n\t'.join(possible_schedulers) + '\n' raise RuntimeError(msg) if options.graphviz: ecto.view_plasm(plasm) if len(options.dotfile) > 0: print >> open(options.dotfile, 'wt'), plasm.viz() if len(options.logfile) > 0: ecto.log_to_file(options.logfile) sched = ecto.schedulers.__dict__[options.scheduler_type](plasm) if options.ipython: use_ipython(options, sched, plasm, locals) else: sched.execute(options.niter, options.nthreads) if options.stats: print sched.stats()
def do_ecto(): baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_mono'), depth=ImageBagger(topic_name='/camera/depth/image_raw'), ) bagwriter = ecto_ros.BagWriter('Bag Writer', baggers=baggers, bag=sys.argv[1], ) subs = dict( image=ImageSub(topic_name='/camera/rgb/image_mono',queue_size=0), depth=ImageSub(topic_name='/camera/depth/image_raw',queue_size=0), ) sync = ecto_ros.Synchronizer('Synchronizator', subs=subs ) im2mat_rgb = ecto_ros.Image2Mat() im2mat_depth = ecto_ros.Image2Mat() graph = [ sync["image"] >> im2mat_rgb["image"], im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb", waitKey=5)[:], sync[:] >> bagwriter[:], sync["depth"] >> im2mat_depth["image"], im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1)[:] ] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.schedulers.Threadpool(plasm) sched.execute(niter=30)#capture a second.
def do_ecto(): baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'), depth=ImageBagger(topic_name='/camera/depth/image'), ) bagreader = ecto_ros.BagReader('Bag Ripper', baggers=baggers, bag=sys.argv[1], ) im2mat_rgb = ecto_ros.Image2Mat() im2mat_depth = ecto_ros.Image2Mat() graph = [ bagreader["image"] >> im2mat_rgb["image"], im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb", waitKey=5)[:], bagreader["depth"] >> im2mat_depth["image"], im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1)[:] ] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.schedulers.Singlethreaded(plasm) sched.execute()
def mygraph(): #instantiate a plasm, our DAG structure plasm = ecto.Plasm() #instantiate processing modules r = hello_ecto.Reader() #notice the keyword args, these get mapped #as parameters p1 = hello_ecto.Printer(str="default") p2 = hello_ecto.Printer(str="default") #connect outputs to inputs plasm.connect(r, "output", p1, "str") plasm.connect(r, "output", p2, "str") if debug: #render the DAG with dot print plasm.viz() ecto.view_plasm(plasm) #an execution loop print "Enter input, q to quit" while r.outputs.output != 'q': plasm.execute() #this executes the graph in compiled code.
def do_ecto(): baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'), depth=ImageBagger(topic_name='/camera/depth/image'), ) bagreader = ecto_ros.BagReader('Bag Ripper', baggers=baggers, bag=sys.argv[1], ) im2mat_rgb = ecto_ros.Image2Mat() im2mat_depth = ecto_ros.Image2Mat() graph = [ bagreader["image"] >> im2mat_rgb["image"], im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb")[:], bagreader["depth"] >> im2mat_depth["image"], im2mat_depth["image"] >> highgui.imshow("depth show", name="depth")[:] ] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.Scheduler(plasm) sched.execute()
def do_ecto(): subs = dict( image=ImageSub(topic_name='/camera/rgb/image_color',queue_size=0), depth=ImageSub(topic_name='/camera/depth/image',queue_size=0), depth_info=CameraInfoSub(topic_name='/camera/depth/camera_info',queue_size=0), image_info=CameraInfoSub(topic_name='/camera/rgb/camera_info',queue_size=0), ) sync = ecto_ros.Synchronizer('Synchronizator', subs=subs ) drift_printer = ecto_ros.DriftPrinter() im2mat_rgb = ecto_ros.Image2Mat() im2mat_depth = ecto_ros.Image2Mat() graph = [ sync["image"] >> im2mat_rgb["image"], im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb", waitKey=5)[:], sync[:] >> drift_printer[:], sync["depth"] >> im2mat_depth["image"], im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1)[:] ] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.schedulers.Threadpool(plasm) sched.execute()
def calibration(rows,cols,square_size,pattern_type,n_obs,video): plasm = ecto.Plasm() pattern_show = highgui.imshow(name="pattern", waitKey=10, autoSize=True) rgb2gray = imgproc.cvtColor(flag=7) circle_detector = calib.PatternDetector(rows=rows, cols=cols,pattern_type=pattern_type,square_size=square_size ) ecto.print_module_doc(circle_detector) circle_drawer = calib.PatternDrawer(rows=rows, cols=cols) camera_calibrator = calib.CameraCalibrator(output_file_name="camera.yml",n_obs=n_obs) plasm.connect(video, "image", rgb2gray, "input") plasm.connect(rgb2gray, "out", circle_detector, "input") plasm.connect(video, "image", circle_drawer, "input") plasm.connect(video, "image", camera_calibrator, "image") plasm.connect(circle_detector, "out", circle_drawer, "points") plasm.connect(circle_detector, "found", circle_drawer, "found") plasm.connect(circle_drawer, "out", pattern_show, "input") plasm.connect(circle_detector, "ideal", camera_calibrator,"ideal") plasm.connect(circle_detector, "out", camera_calibrator,"points") plasm.connect(circle_detector, "found", camera_calibrator, "found") print plasm.viz() ecto.view_plasm(plasm) while(pattern_show.outputs.out != 27 and camera_calibrator.outputs.calibrated == False): plasm.execute(1)
def do_ecto(): baggers = dict( image=ImageBagger(topic_name='/camera/rgb/image_mono'), depth=ImageBagger(topic_name='/camera/depth/image_raw'), ) bagwriter = ecto_ros.BagWriter( 'Bag Writer', baggers=baggers, bag=sys.argv[1], ) subs = dict( image=ImageSub(topic_name='/camera/rgb/image_mono', queue_size=0), depth=ImageSub(topic_name='/camera/depth/image_raw', queue_size=0), ) sync = ecto_ros.Synchronizer('Synchronizator', subs=subs) im2mat_rgb = ecto_ros.Image2Mat() im2mat_depth = ecto_ros.Image2Mat() graph = [ sync["image"] >> im2mat_rgb["image"], im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb")[:], sync[:] >> bagwriter[:], sync["depth"] >> im2mat_depth["image"], im2mat_depth["image"] >> highgui.imshow("depth show", name="depth")[:] ] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.schedulers.Threadpool(plasm) sched.execute(niter=30) #capture a second.
def calibration(rows, cols, square_size, pattern_type, n_obs, video): plasm = ecto.Plasm() pattern_show = highgui.imshow(name="pattern", waitKey=10, autoSize=True) rgb2gray = imgproc.cvtColor(flag=7) circle_detector = calib.PatternDetector(rows=rows, cols=cols, pattern_type=pattern_type, square_size=square_size) ecto.print_module_doc(circle_detector) circle_drawer = calib.PatternDrawer(rows=rows, cols=cols) camera_calibrator = calib.CameraCalibrator(output_file_name="camera.yml", n_obs=n_obs) plasm.connect(video, "image", rgb2gray, "input") plasm.connect(rgb2gray, "out", circle_detector, "input") plasm.connect(video, "image", circle_drawer, "input") plasm.connect(video, "image", camera_calibrator, "image") plasm.connect(circle_detector, "out", circle_drawer, "points") plasm.connect(circle_detector, "found", circle_drawer, "found") plasm.connect(circle_drawer, "out", pattern_show, "input") plasm.connect(circle_detector, "ideal", camera_calibrator, "ideal") plasm.connect(circle_detector, "out", camera_calibrator, "points") plasm.connect(circle_detector, "found", camera_calibrator, "found") print plasm.viz() ecto.view_plasm(plasm) while (pattern_show.outputs.out != 27 and camera_calibrator.outputs.calibrated == False): plasm.execute(1)
def test_plasm(nlevels, cellsperlevel, niter): plasm = build_addergraph(nlevels, cellsperlevel) suffix = '%ulvls_%uperlevel_%uiter' %(nlevels, cellsperlevel, niter) o = open('graph_%s.dot' %suffix , 'w') print >>o, plasm.viz() o.close() ecto.view_plasm(plasm) sched = ecto.schedulers.Threadpool(plasm) sched.execute(niter)
def do_ecto(self): plasm = ecto.Plasm() plasm.connect(self.graph) # sched = ecto.Scheduler(plasm) # sched.execute(niter=1) ecto.view_plasm(plasm) # add ecto scheduler args. run_plasm(self.options, plasm, locals=vars())
def test_plasm(nlevels, nthreads, niter): (plasm, outnode) = build_addergraph(nlevels) #o = open('graph.dot', 'w') #print >>o, plasm.viz() #o.close() ecto.view_plasm(plasm) sched = ecto.schedulers.Singlethreaded(plasm) sched.execute(niter) print "RESULT:", outnode.outputs.out shouldbe = float(2**nlevels * niter) print "expected:", shouldbe assert outnode.outputs.out == shouldbe
def do_ecto(): sub_rgb = ImageSub("image_sub", topic_name="/camera/rgb/image_mono") sub_depth = ImageSub("depth_sub", topic_name="/camera/depth/image") pub_rgb = ImagePub("image_pub", topic_name="my_image") pub_depth = ImagePub("depth_pub", topic_name="my_depth") graph = [sub_rgb["output"] >> pub_rgb["input"], sub_depth["output"] >> pub_depth["input"]] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.schedulers.Threadpool(plasm) sched.execute()
def do_ecto(): sub_rgb = ImageSub("image_sub", topic_name='/camera/rgb/image_mono') sub_depth = ImageSub("depth_sub", topic_name='/camera/depth/image') pub_rgb = ImagePub("image_pub", topic_name='my_image') pub_depth = ImagePub("depth_pub", topic_name='my_depth') graph = [ sub_rgb["output"] >> pub_rgb["input"], sub_depth["output"] >> pub_depth["input"] ] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.schedulers.Threadpool(plasm) sched.execute()
def do_ecto(): sub_rgb = ImageSub("image_sub",topic_name='/camera/rgb/image_mono') sub_depth = ImageSub("depth_sub",topic_name='/camera/depth/image') pub_rgb = ImagePub("image_pub", topic_name='my_image') pub_depth = ImagePub("depth_pub", topic_name='my_depth') graph = [ sub_rgb["output"] >> pub_rgb["input"], sub_depth["output"] >> pub_depth["input"] ] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.Scheduler(plasm) sched.execute()
def do_ecto(): sub_rgb = ImageSub("image_sub", topic_name='/camera/rgb/image_mono') sub_depth = ImageSub("depth_sub", topic_name='/camera/depth/image') im2mat_rgb = ecto_ros.Image2Mat() im2mat_depth = ecto_ros.Image2Mat() graph = [ sub_rgb["output"] >> im2mat_rgb["image"], im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb")[:], sub_depth["output"] >> im2mat_depth["image"], im2mat_depth["image"] >> highgui.imshow("depth show", name="depth")[:] ] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.Scheduler(plasm) sched.execute()
def do_ecto(): sub_rgb = ImageSub("image_sub",topic_name='/camera/rgb/image_mono') sub_depth = ImageSub("depth_sub",topic_name='/camera/depth/image') im2mat_rgb = ecto_ros.Image2Mat() im2mat_depth = ecto_ros.Image2Mat() graph = [ sub_rgb["output"]>>im2mat_rgb["image"], im2mat_rgb["image"] >> highgui.imshow("rgb show",name="rgb")[:], sub_depth["output"]>> im2mat_depth["image"], im2mat_depth["image"] >> highgui.imshow("depth show",name="depth")[:] ] plasm = ecto.Plasm() plasm.connect(graph) ecto.view_plasm(plasm) sched = ecto.schedulers.Threadpool(plasm) sched.execute()
def mygraph(): #instantiate a plasm, our DAG structure plasm = ecto.Plasm() #instantiate processing modules r = hello_ecto.Reader() #notice the keyword args, these get mapped #as parameters p1 = hello_ecto.Printer(str="default") p2 = hello_ecto.Printer(str="default") #connect outputs to inputs plasm.connect(r["output"] >> (p1["str"], p2["str"])) if debug: #render the DAG with dot ecto.view_plasm(plasm) #an execution loop print "Enter input, q to quit" while r.outputs.output != 'q': plasm.execute() #this executes the graph in compiled code.
def run_plasm(options, plasm, locals={}): ''' Run the plasm given the options from the command line parser. :param options: The command line, preparsed options object. It is assumed that you have filled this object using scheduler_options. :param plasm: The plasm to run. :param locals: Any local variables that you would like available to the iPython shell session. ''' tail_shell = None if options.devel: options.graphviz = True options.stats = True options.logfile = '/tmp/ecto.log' options.ipython = True if options.graphviz: ecto.view_plasm(plasm) if len(options.dotfile) > 0: print >> open(options.dotfile, 'wt'), plasm.viz() if len(options.logfile) > 0: ecto.log_to_file(options.logfile) if options.devel: command = "/usr/bin/uxterm -T /tmp/ecto.log -e tail -f %s" % options.logfile print 'opening devel shell: %s' % command tail_shell = subprocess.Popen(command, shell=True) if options.gui: from ecto.gui import gui_execute gui_execute(plasm) else: sched = ecto.Scheduler(plasm) if options.ipython: use_ipython(options, sched, plasm, locals) else: sched.execute(options.niter) if options.stats: print sched.stats()
def run_plasm(options, plasm, locals={}): ''' Run the plasm given the options from the command line parser. :param options: The command line, preparsed options object. It is assumed that you have filled this object using scheduler_options. :param plasm: The plasm to run. :param locals: Any local variables that you would like available to the iPython shell session. ''' if options.graphviz: ecto.view_plasm(plasm) if len(options.dotfile) > 0: print >> open(options.dotfile, 'wt'), plasm.viz() if len(options.logfile) > 0: ecto.log_to_file(options.logfile) if options.gui: from ecto.gui import gui_execute gui_execute(plasm) else: sched = ecto.Scheduler(plasm) if options.ipython: use_ipython(options, sched, plasm, locals) else: sched.execute(options.niter) if options.stats: print sched.stats()
def do_ecto(): # ecto options parser = argparse.ArgumentParser( description='Publish images from directory.') scheduler_options(parser) args = parser.parse_args() #this will read all images in the path path = '/home/sam/Code/vision/VOCdevkit/VOC2012/JPEGImages' images = ImageReader(path=os.path.expanduser(path)) mat2image = ecto_ros.Mat2Image(encoding='rgb8') pub_rgb = ImagePub("image_pub", topic_name='/camera/rgb/image_raw') display = imshow(name='image', waitKey=5000) plasm = ecto.Plasm() plasm.connect( images['image'] >> mat2image['image'], images['image'] >> display['image'], mat2image['image'] >> pub_rgb['input'], ) ecto.view_plasm(plasm) run_plasm(args, plasm, locals=vars())
#!/usr/bin/env python # # Simple vid cap # import ecto from ecto_opencv import highgui, calib, imgproc plasm = ecto.Plasm() video_cap = highgui.VideoCapture(video_device=0) fps = highgui.FPSDrawer() video_display = highgui.imshow('imshow', name='video_cap', waitKey=2) saver = highgui.VideoWriter("saver", video_file='ecto.avi', fps=15) plasm.connect(video_cap['image'] >> fps['image'], fps['image'] >> video_display['input'], fps['image'] >> saver['image'], ) if __name__ == '__main__': ecto.view_plasm(plasm) sched = ecto.schedulers.Threadpool(plasm) sched.execute()
cols=3, pattern_type="acircles", square_size=0.03) circle_drawer = calib.PatternDrawer(rows=7, cols=3) circle_display = highgui.imshow('Pattern show', name='Pattern', waitKey=2, maximize=True) pose_calc = calib.FiducialPoseFinder('Pose Calc') pose_draw = calib.PoseDrawer('Pose Draw') camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml") plasm.connect( video_cap['image'] >> circle_drawer['input'], circle_drawer['out'] >> pose_draw['image'], pose_draw['output'] >> fps['image'], fps['image'] >> circle_display['input'], video_cap['image'] >> rgb2gray['input'], rgb2gray['out'] >> circle_detector['input'], circle_detector['out', 'found'] >> circle_drawer['points', 'found'], camera_info['K'] >> (pose_calc['K'], pose_draw['K']), circle_detector['out', 'ideal', 'found'] >> pose_calc['points', 'ideal', 'found'], pose_calc['R', 'T'] >> pose_draw['R', 'T']) ecto.view_plasm(plasm) sched.execute()
def do_projector(): options = parse_options() # define the input subs = dict(image=ImageSub(topic_name='camera/rgb/image_color', queue_size=0), depth=ImageSub(topic_name='camera/depth/image', queue_size=0), depth_info=CameraInfoSub(topic_name='camera/depth/camera_info', queue_size=0), image_info=CameraInfoSub(topic_name='camera/rgb/camera_info', queue_size=0), ) sync = ecto_ros.Synchronizer('Synchronizator', subs=subs) im2mat_rgb = ecto_ros.Image2Mat() im2mat_depth = ecto_ros.Image2Mat() invert = imgproc.BitwiseNot() brg2rgb = imgproc.cvtColor('bgr -> rgb', flag=4) camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml") pattern_draw = projector.PatternProjector() circle_detector = calib.PatternDetector('Dot Detector', rows=5, cols=3, pattern_type="acircles", square_size=0.04) circle_drawer = calib.PatternDrawer('Circle Draw', rows=5, cols=3) calibrator = projector.Calibrator(); s1 = ecto.Strand() main_display = highgui.imshow("rgb show", name="rgb", waitKey=5, strand=s1) graph = [ sync["image"] >> im2mat_rgb["image"], im2mat_rgb["image"] >> main_display[:], sync["depth"] >> im2mat_depth["image"], im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1, strand=s1)[:], im2mat_rgb["image"] >> invert[:], invert[:] >> circle_detector["input"], im2mat_rgb["image"] >> brg2rgb["input"], brg2rgb["out"] >> circle_drawer['input'], circle_detector['out', 'found'] >> circle_drawer['points', 'found'], circle_drawer["out"] >> highgui.imshow("pattern show", name="pattern", waitKey= -1, strand=s1)[:], # sync["image","depth"] >> pattern_detection['image', 'depth'], # pattern_detection['points'] >> projection_estimator['points'] ] print "Press 's' to capture a view." # add the display of the pattern video_display = highgui.imshow('pattern', name='video_cap', waitKey=2, maximize=False, autoSize=True) graph += [pattern_draw['pattern'] >> video_display['input'], pattern_draw['points'] >> calibrator['pattern'], circle_detector['out', 'found'] >> calibrator['points', 'found'], camera_info['K'] >> calibrator['K'], main_display['out'] >> calibrator['trigger'], im2mat_depth["image"] >> calibrator['depth'] ] plasm = ecto.Plasm() plasm.connect(graph) # display DEBUG data if needed if DEBUG: print plasm.viz() ecto.view_plasm(plasm) # execute the pipeline sched = ecto.schedulers.Singlethreaded(plasm) sched.execute()
rospack = rospkg.RosPack() if any([args.yaml_file.startswith(x) for x in [".", "/"]]): yml_file = [args.yaml_file] else: yml_file = map( lambda x: os.path.join(rospack.get_path("ecto_rbo_yaml"), "data/", x), [args.yaml_file]) ecto_plasm, ecto_cells = plasm_yaml_factory.load(yml_file, debug=args.debug) rospy.loginfo("Plasm initialized: %s" % (yml_file)) if args.showgraph: ecto.view_plasm( ecto_plasm, yml_file if type(yml_file) == str else " ".join(yml_file)) #while not rospy.is_shutdown(): server = VisionServer(ecto_plasm, ecto_cells) if args.service: run_sub = rospy.Service('compute_ec_graph', ComputeECGraph, server.handle_compute_ec_graph) rospy.spin() else: server.ecto_scheduler.execute() print(server.ecto_scheduler.stats())
if use_icp: depth_drawer = ecto_opencv.highgui.imshow("Drawer",name="depth edges", waitKey=10) sub_graph += [edge_detector[:] >> depth_drawer[:]] if restarts > 0: seg2mat = ecto_corrector.SegmentsToMat("Seg2Mat",min_size=10) seg_drawer = ecto_opencv.highgui.imshow("Drawer",name="segments", waitKey=10) sub_graph += [ segmenter["valid_segments"] >> seg2mat["segments"], apply_roi[:] >> seg2mat["input"], seg2mat[:] >> seg_drawer[:], ] sub_plasm.connect(sub_graph) if(debug_graphs): ecto.view_plasm(sub_plasm) #conditional executer for correction subplams executer = ecto_X.Executer(plasm=sub_plasm, niter=1, outputs={}, inputs={"in_pose":noise_adder,"ply_file":model_loader}) correction_subgraph_if = ecto.If('Correction if success',cell=executer) """ Main Plasm """ main_plasm = ecto.Plasm() #triggering sub_image = ecto_sensor_msgs.Subscriber_Image("Image Subscriber",
def do_projector(): options = parse_options() # define the input subs = dict(image=ImageSub(topic_name='camera/rgb/image_color', queue_size=0), depth=ImageSub(topic_name='camera/depth/image', queue_size=0), depth_info=CameraInfoSub(topic_name='camera/depth/camera_info', queue_size=0), image_info=CameraInfoSub(topic_name='camera/rgb/camera_info', queue_size=0), ) sync = ecto_ros.Synchronizer('Synchronizator', subs=subs) im2mat_rgb = ecto_ros.Image2Mat() im2mat_depth = ecto_ros.Image2Mat() invert = imgproc.BitwiseNot() brg2rgb = imgproc.cvtColor('bgr -> rgb', flag=4) bgr2gray = imgproc.cvtColor('bgr -> gray', flag=7) camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml") calibrator = projector.Calibrator() plasm = ecto.Plasm() offset_x = 0 pose_from_fiducial = PoseFromFiducial(plasm, rows=5, cols=3, pattern_type="acircles", square_size=0.04, offset_x=offset_x, debug=DEBUG) pose_from_plane = projector.PlaneFitter() s1 = ecto.Strand() main_display = highgui.imshow("rgb show", name="rgb", waitKey=5, strand=s1) graph = [ sync["image"] >> im2mat_rgb["image"], im2mat_rgb["image"] >> main_display[:], sync["depth"] >> im2mat_depth["image"], im2mat_depth["image"] >> (highgui.imshow("depth show", name="depth", waitKey= -1, strand=s1)[:],pose_from_plane['depth']), im2mat_rgb["image"] >> (brg2rgb["input"], bgr2gray["input"]), brg2rgb["out"] >> pose_from_fiducial['color_image'], bgr2gray["out"] >> pose_from_fiducial['image'], camera_info['K'] >> (pose_from_fiducial['K'],pose_from_plane['K']), pose_from_fiducial['debug_image'] >> highgui.imshow("pattern show", name="pattern", waitKey= -1, strand=s1)[:], # sync["image","depth"] >> pattern_detection['image', 'depth'], # pattern_detection['points'] >> projection_estimator['points'] ] print "Press 's' to capture a view." pose_draw = calib.PoseDrawer('Plane Pose Draw') # add the display of the pattern video_display = highgui.imshow('pattern', name='video_cap', waitKey=2, maximize=False, autoSize=True) warper = projector.FiducialWarper(projection_file='projector_calibration.yml') graph += [pose_from_fiducial['R', 'T', 'found'] >> (warper['R', 'T', 'found'],), pose_from_plane['R','T'] >> pose_draw['R', 'T'], camera_info['K'] >> pose_draw['K'], pose_from_fiducial['debug_image'] >> pose_draw['image'], pose_draw['output'] >> highgui.imshow("warped image", name="warped", waitKey= -1, strand=s1)[:], ] plasm.connect(graph) # display DEBUG data if needed if DEBUG: print plasm.viz() ecto.view_plasm(plasm) # execute the pipeline sched = ecto.schedulers.Singlethreaded(plasm) sched.execute()