def __init__(self, plasm, rows, cols, pattern_type, square_size, offset_x=0, offset_y=0, offset_z=0, debug=True): ecto.BlackBox.__init__(self, plasm) self.video_cap = ecto.Passthrough('Image Input') self.camera_info = ecto.Passthrough('K') self.circle_detector = calib.PatternDetector('Dot Detector', rows=rows, cols=cols, pattern_type=pattern_type, square_size=square_size, offset_x=offset_x, offset_y=offset_y, offset_z=offset_z) self.pose_calc = calib.FiducialPoseFinder('Pose Calc') self.debug = debug self.circle_drawer = calib.PatternDrawer('Circle Draw', rows=rows, cols=cols) self.pose_draw = calib.PoseDrawer('Pose Draw')
def __init__(self, plasm, rows, cols, pattern_type, square_size, debug=True): ecto.BlackBox.__init__(self, plasm) self.video_cap = ecto.Passthrough('Image Input') self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7) self.circle_detector = calib.PatternDetector('Dot Detector', rows=rows, cols=cols, pattern_type=pattern_type, square_size=square_size) self.pose_calc = calib.FiducialPoseFinder('Pose Calc') self.camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml") self.debug = debug if self.debug: self.fps = highgui.FPSDrawer() self.circle_drawer = calib.PatternDrawer('Circle Draw', rows=rows, cols=cols) self.circle_display = highgui.imshow('Pattern show', name='Pattern', waitKey=2, autoSize=True) self.pose_draw = calib.PoseDrawer('Pose Draw')
def declare_io(self, p, i, o): self.gray_image = ecto.Passthrough('gray Input') self.rgb_image = ecto.Passthrough('rgb Input') self.camera_info = ecto.Passthrough('K') self.gather = calib.GatherPoints("gather", N=2) #TODO parameterize the quantizer #abuse saturated Arithmetics http://opencv.itseez.com/modules/core/doc/intro.html?highlight=saturated. self.quantizer = imgproc.Quantize('Quantizer', alpha=1, beta=0) self.invert = imgproc.BitwiseNot() self.debug = p.debug offset_x = -.3095 #TODO: FIXME hard coded offset_y = -.1005 self.cd_bw = calib.PatternDetector( 'Dot Detector, B/W', rows=p.rows, cols=p.cols, pattern_type=p.pattern_type, square_size=p.square_size, offset_x=offset_x, offset_y=offset_y, ) offset_x = .1505 #TODO: FIXME hard coded self.cd_wb = calib.PatternDetector( 'Dot Detector, W/B', rows=p.rows, cols=p.cols, pattern_type=p.pattern_type, square_size=p.square_size, offset_x=offset_x, offset_y=offset_y, ) self.pose_calc = calib.FiducialPoseFinder('Pose Calc') self.circle_drawer = calib.PatternDrawer('Circle Draw', rows=p.rows, cols=p.cols) self.circle_drawer2 = calib.PatternDrawer('Circle Draw', rows=p.rows, cols=p.cols) self.pose_draw = calib.PoseDrawer('Pose Draw') self.fps = highgui.FPSDrawer() #inputs i.declare('image', self.gray_image.inputs.at('in')) i.declare('color_image', self.rgb_image.inputs.at('in')) i.declare('K', self.camera_info.inputs.at('in')) #outputs o.declare('R', self.pose_calc.outputs.at('R')) o.declare('T', self.pose_calc.outputs.at('T')) o.declare('found', self.gather.outputs.at('found')) o.declare('debug_image', self.fps.outputs.at('image'))
def __init__(self, plasm): ecto.BlackBox.__init__(self, plasm) self.video_cap = highgui.VideoCapture(video_device=0) self.fps = highgui.FPSDrawer() self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7) self.circle_detector = calib.PatternDetector('Dot Detector', rows=7, cols=3, pattern_type="acircles", square_size=0.03) self.circle_drawer = calib.PatternDrawer('Circle Draw', rows=7, cols=3) self.circle_display = highgui.imshow('Pattern show', name='Pattern', waitKey=2, maximize=True) self.pose_calc = calib.FiducialPoseFinder('Pose Calc') self.pose_draw = calib.PoseDrawer('Pose Draw') self.camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")
def __init__(self, plasm, rows, cols, pattern_type, square_size, debug=True): ecto.BlackBox.__init__(self, plasm) self.gray_image = ecto.Passthrough('gray Input') self.rgb_image = ecto.Passthrough('rgb Input') self.camera_info = ecto.Passthrough('K') self.gather = calib.GatherPoints("gather", N=2) self.invert = imgproc.BitwiseNot() offset_x = -.310 offset_y = -.101099 self.cd_bw = calib.PatternDetector( 'Dot Detector, B/W', rows=rows, cols=cols, pattern_type=pattern_type, square_size=square_size, offset_x=offset_x, offset_y=offset_y, ) offset_x = .150 self.cd_wb = calib.PatternDetector( 'Dot Detector, W/B', rows=rows, cols=cols, pattern_type=pattern_type, square_size=square_size, offset_x=offset_x, offset_y=offset_y, ) self.pose_calc = calib.FiducialPoseFinder('Pose Calc') self.circle_drawer = calib.PatternDrawer('Circle Draw', rows=rows, cols=cols) self.pose_draw = calib.PoseDrawer('Pose Draw')
rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7) circle_detector = calib.PatternDetector(rows=7, cols=3, pattern_type="acircles", square_size=0.03) circle_drawer = calib.PatternDrawer(rows=7, cols=3) circle_display = highgui.imshow('Pattern show', name='Pattern', waitKey=2, maximize=True) pose_calc = calib.FiducialPoseFinder('Pose Calc') pose_draw = calib.PoseDrawer('Pose Draw') camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml") plasm.connect( video_cap['image'] >> circle_drawer['input'], circle_drawer['out'] >> pose_draw['image'], pose_draw['output'] >> fps['image'], fps['image'] >> circle_display['input'], video_cap['image'] >> rgb2gray['input'], rgb2gray['out'] >> circle_detector['input'], circle_detector['out', 'found'] >> circle_drawer['points', 'found'], camera_info['K'] >> (pose_calc['K'], pose_draw['K']), circle_detector['out', 'ideal', 'found'] >> pose_calc['points', 'ideal', 'found'], pose_calc['R', 'T'] >> pose_draw['R', 'T'])
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired=72, orb_template='', res=SXGA_RES, fps=FPS_30, orb_matches=False, preview=False, use_turn_table=True): ''' Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views. @param bag_name: A filename for the bag, will write to this file. @param angle_thresh: The angle threshhold in radians to sparsify the views with. ''' graph = [] # try several parameter combinations source = create_source('image_pipeline', 'OpenNISource', outputs_list=['K_depth', 'K_image', 'camera', 'image', 'depth', 'points3d', 'mask_depth'], res=res, fps=fps) # convert the image to grayscale rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY) graph += [source['image'] >> rgb2gray[:] ] # Find planes plane_est = PlaneFinder(min_size=10000) compute_normals = ComputeNormals() # Convert K if the resolution is different (the camera should output that) graph += [ # find the normals source['K_depth', 'points3d'] >> compute_normals['K', 'points3d'], # find the planes compute_normals['normals'] >> plane_est['normals'], source['K_depth', 'points3d'] >> plane_est['K', 'points3d'] ] if orb_template: # find the pose using ORB poser = OrbPoseEstimator(directory=orb_template, show_matches=orb_matches) graph += [ source['image', 'K_image', 'mask_depth', 'points3d'] >> poser['color_image', 'K_image', 'mask', 'points3d'], rgb2gray[:] >> poser['image'] ] else: # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only poser = OpposingDotPoseEstimator(rows=5, cols=3, pattern_type=calib.ASYMMETRIC_CIRCLES_GRID, square_size=0.04, debug=True) graph += [ source['image', 'K_image'] >> poser['color_image', 'K_image'], rgb2gray[:] >> poser['image'] ] # filter the previous pose and resolve the scale ambiguity using 3d pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter(); # make sure the pose is centered at the origin of the plane graph += [ source['K_depth'] >> pose_filter['K_depth'], poser['R', 'T'] >> pose_filter['R', 'T'], plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ] # draw the found pose pose_drawer = calib.PoseDrawer('Pose Draw') display = highgui.imshow(name='Poses') graph += [ pose_filter['found'] >> pose_drawer['trigger'], poser['debug_image'] >> pose_drawer['image'], source['K_image'] >> pose_drawer['K'], pose_filter['R', 'T'] >> pose_drawer['R', 'T'], pose_drawer['output'] >> display[:] ] delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh, n_desired=n_desired)) poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame') graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'], pose_filter['R', 'T'] >> poseMsg['R', 'T'] ] # publish the source data rgbMsg = Mat2Image(frame_id='/camera_rgb_optical_frame', swap_rgb=True) depthMsg = Mat2Image(frame_id='/camera_rgb_optical_frame') graph += [ source['depth'] >> depthMsg[:], source['image'] >> rgbMsg[:] ] # mask out the object masker = segmentation_cell graph += [ source['points3d'] >> masker['points3d'], plane_est['masks', 'planes'] >> masker['masks', 'planes'], pose_filter['T'] >> masker['T'] ] # publish the mask maskMsg = Mat2Image(frame_id='/camera_rgb_optical_frame') graph += [ masker['mask'] >> maskMsg[:] ] camera2cv = CameraModelToCv() cameraMsg = Cv2CameraInfo(frame_id='/camera_rgb_optical_frame') graph += [source['camera'] >> camera2cv['camera'], camera2cv['K', 'D', 'image_size'] >> cameraMsg['K', 'D', 'image_size'] ] #display the mask mask_display = object_recognition_capture.MaskDisplay() mask_display_highgui = highgui.imshow(name='mask') graph += [ masker['mask'] >> mask_display['mask'], source['image'] >> mask_display['image'], mask_display['image'] >> mask_display_highgui['image'], ] if not preview: baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'), depth=ImageBagger(topic_name='/camera/depth/image'), mask=ImageBagger(topic_name='/camera/mask'), pose=PoseBagger(topic_name='/camera/pose'), image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'), depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'), ) bagwriter = ecto.If('Bag Writer if R|T', cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name) ) graph += [ rgbMsg[:] >> bagwriter['image'], depthMsg[:] >> bagwriter['depth'], cameraMsg[:] >> (bagwriter['image_ci'], bagwriter['depth_ci']), poseMsg['pose'] >> bagwriter['pose'], maskMsg[:] >> bagwriter['mask'], ] novel = delta_pose['novel'] if use_turn_table: table = TurnTable(angle_thresh=angle_thresh) ander = ecto.And() graph += [ table['trigger'] >> (delta_pose['__test__'], ander['in2']), delta_pose['novel'] >> ander['in1'], ] novel = ander['out'] else: delta_pose.inputs.__test__ = True graph += [novel >> (bagwriter['__test__'])] plasm = ecto.Plasm() plasm.connect(graph) return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
from ecto_opencv import highgui, calib, imgproc debug = True plasm = ecto.Plasm() rows = 7 cols = 3 square_size = 0.03 # in millis pattern_show = highgui.imshow(name="pattern", waitKey=10, autoSize=True) rgb2gray = imgproc.cvtColor(flag=7) video = highgui.VideoCapture(video_device=0) circle_detector = calib.PatternDetector(rows=rows, cols=cols, pattern_type="acircles", square_size=square_size) circle_drawer = calib.PatternDrawer(rows=rows, cols=cols) poser = calib.FiducialPoseFinder() pose_drawer = calib.PoseDrawer() camera_intrinsics = calib.CameraIntrinsics(camera_file="camera.yml") plasm.connect(video, "image", rgb2gray, "input") plasm.connect(rgb2gray, "out", circle_detector, "input") plasm.connect(video, "image", circle_drawer, "input") plasm.connect(circle_detector, "out", circle_drawer, "points") plasm.connect(circle_detector, "found", circle_drawer, "found") plasm.connect(camera_intrinsics, "K", poser, "K") plasm.connect(circle_detector, "out", poser, "points") plasm.connect(circle_detector, "ideal", poser, "ideal") plasm.connect(circle_detector, "found", poser, "found") plasm.connect(poser, "R", pose_drawer, "R") plasm.connect(poser, "T", pose_drawer, "T") plasm.connect(circle_drawer, "out", pose_drawer, "image")