コード例 #1
0
def calibration(rows, cols, square_size, pattern_type, n_obs, video):
    plasm = ecto.Plasm()
    pattern_show = highgui.imshow(name="pattern", waitKey=10, autoSize=True)
    rgb2gray = imgproc.cvtColor(flag=7)
    circle_detector = calib.PatternDetector(rows=rows,
                                            cols=cols,
                                            pattern_type=pattern_type,
                                            square_size=square_size)
    ecto.print_module_doc(circle_detector)
    circle_drawer = calib.PatternDrawer(rows=rows, cols=cols)
    camera_calibrator = calib.CameraCalibrator(output_file_name="camera.yml",
                                               n_obs=n_obs)

    plasm.connect(video, "image", rgb2gray, "input")
    plasm.connect(rgb2gray, "out", circle_detector, "input")
    plasm.connect(video, "image", circle_drawer, "input")
    plasm.connect(video, "image", camera_calibrator, "image")
    plasm.connect(circle_detector, "out", circle_drawer, "points")
    plasm.connect(circle_detector, "found", circle_drawer, "found")
    plasm.connect(circle_drawer, "out", pattern_show, "input")
    plasm.connect(circle_detector, "ideal", camera_calibrator, "ideal")
    plasm.connect(circle_detector, "out", camera_calibrator, "points")
    plasm.connect(circle_detector, "found", camera_calibrator, "found")

    print plasm.viz()
    ecto.view_plasm(plasm)

    while (pattern_show.outputs.out != 27
           and camera_calibrator.outputs.calibrated == False):
        plasm.execute(1)
コード例 #2
0
ファイル: calibrator.py プロジェクト: straszheim/ecto_opencv
def calibration(rows,cols,square_size,pattern_type,n_obs,video):
    plasm = ecto.Plasm()
    pattern_show = highgui.imshow(name="pattern", waitKey=10, autoSize=True)
    rgb2gray = imgproc.cvtColor(flag=7)
    circle_detector = calib.PatternDetector(rows=rows, cols=cols,pattern_type=pattern_type,square_size=square_size )
    ecto.print_module_doc(circle_detector)
    circle_drawer = calib.PatternDrawer(rows=rows, cols=cols)
    camera_calibrator = calib.CameraCalibrator(output_file_name="camera.yml",n_obs=n_obs)
    
    plasm.connect(video, "image", rgb2gray, "input")
    plasm.connect(rgb2gray, "out", circle_detector, "input")
    plasm.connect(video, "image", circle_drawer, "input")
    plasm.connect(video, "image", camera_calibrator, "image")
    plasm.connect(circle_detector, "out", circle_drawer, "points")
    plasm.connect(circle_detector, "found", circle_drawer, "found")
    plasm.connect(circle_drawer, "out", pattern_show, "input")
    plasm.connect(circle_detector, "ideal", camera_calibrator,"ideal")
    plasm.connect(circle_detector, "out", camera_calibrator,"points")
    plasm.connect(circle_detector, "found", camera_calibrator, "found")
    
    print plasm.viz()
    ecto.view_plasm(plasm)
    
    while(pattern_show.outputs.out != 27 and camera_calibrator.outputs.calibrated == False):
        plasm.execute(1)
コード例 #3
0
ファイル: rounder.py プロジェクト: ethanrublee/ecto_talk
    def __init__(self, plasm,rows,cols,pattern_type,square_size,debug=True):
        ecto.BlackBox.__init__(self, plasm)
        if debug:
            print self.__class__, "enabling debug nodes"
        self.video_cap = ecto.Passthrough('Image Input')
        self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.RGB2GRAY)
        self.circle_detector = calib.PatternDetector('Dot Detector',
                                                     rows=rows, cols=cols,
                                                     pattern_type=pattern_type,
                                                     square_size=square_size)
        self.pose_calc = calib.FiducialPoseFinder('Pose Calc')
        self.camera_info = calib.CameraIntrinsics('Camera Info',
                                                  camera_file="camera.yml")
        self.trans = imgproc.Translate("trans", x=0.04 * 3)

        self.debug = debug
        if self.debug:
            self.fps = highgui.FPSDrawer()
            self.circle_drawer = calib.PatternDrawer('Circle Draw',
                                                     rows=rows, cols=cols)
            self.pose_draw = calib.PoseDrawer('Pose Draw')

            self.circle_conv = ecto_ros.Mat2Image('Fiducial Pose Converter')
            self.circle_pub = ecto_sensor_msgs.Publisher_Image('Fiducial Image w/ Pose Publisher', topic_name='/ecto/circles')

            self.foundprinter = ecto_test.Printer("printy", print_type='bool')
コード例 #4
0
ファイル: calibrator.py プロジェクト: ethanrublee/ecto_talk
def make_calib(images, plasm, calibfname):
    rgb2gray = imgproc.cvtColor(flag=imgproc.RGB2GRAY)
    plasm.connect(images["image"] >> rgb2gray[:])

    detect = calib.PatternDetector(rows=5, cols=3,
                                   pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                   square_size=0.04)
    plasm.connect(rgb2gray["out"] >> detect["input"])

    draw = calib.PatternDrawer(rows=5, cols=3)
    plasm.connect(detect["out"] >> draw["points"],
                  detect["found"] >> draw["found"],
                  images["image"] >> draw["input"])

    fps = highgui.FPSDrawer()
    plasm.connect(draw[:] >> fps[:])

    calibcell = calib.CameraCalibrator(output_file_name=calibfname + '.yml',
                                       n_obs=75,
                                       quit_when_calibrated=False)

    plasm.connect(detect["ideal"] >> calibcell["ideal"],
                  detect["out"] >> calibcell["points"],
                  detect["found"] >> calibcell["found"],
                  images["image"] >> calibcell["image"])
    pattern_show = highgui.imshow(name=calibfname, waitKey=10)
    plasm.connect(fps[:] >> pattern_show["input"])
コード例 #5
0
 def __init__(self,
              plasm,
              rows,
              cols,
              pattern_type,
              square_size,
              debug=True):
     ecto.BlackBox.__init__(self, plasm)
     self.video_cap = ecto.Passthrough('Image Input')
     self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
     self.circle_detector = calib.PatternDetector('Dot Detector',
                                                  rows=rows,
                                                  cols=cols,
                                                  pattern_type=pattern_type,
                                                  square_size=square_size)
     self.pose_calc = calib.FiducialPoseFinder('Pose Calc')
     self.camera_info = calib.CameraIntrinsics('Camera Info',
                                               camera_file="camera.yml")
     self.debug = debug
     if self.debug:
         self.fps = highgui.FPSDrawer()
         self.circle_drawer = calib.PatternDrawer('Circle Draw',
                                                  rows=rows,
                                                  cols=cols)
         self.circle_display = highgui.imshow('Pattern show',
                                              name='Pattern',
                                              waitKey=2,
                                              autoSize=True)
         self.pose_draw = calib.PoseDrawer('Pose Draw')
コード例 #6
0
def connect_observation_calc(sync, commit, object_id, session_id, debug=False):
    plasm = ecto.Plasm()
    depth_ci = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
    image_ci = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')

    image = ecto_ros.Image2Mat()
    depth = ecto_ros.Image2Mat()
    #conversions
    plasm.connect(
                  sync["image"] >> image["image"],
                  sync["depth"] >> depth['image'],
                  sync['image_ci'] >> image_ci[:],
                  sync['depth_ci'] >> depth_ci[:]
                  )

    rgb = imgproc.cvtColor('bgr -> rgb', flag=imgproc.Conversion.BGR2RGB)
    gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)

    calc_observations = CalcObservations(plasm)
    rescale_depth = capture.RescaledRegisteredDepth() #this is for SXGA mode scale handling.
    plasm.connect(image[:] >> (rgb[:], gray[:]),
                  gray[:] >> (calc_observations['image'], rescale_depth['image']),
                  depth[:] >> rescale_depth['depth'],
                  rescale_depth['depth'] >> calc_observations['depth'],
                  image_ci['K'] >> calc_observations['K'],
                  )
    if debug:
        image_display = highgui.imshow('image display', name='image', waitKey=10, autoSize=True)
        mask_display = highgui.imshow('mask display', name='mask', waitKey= -1, autoSize=True)
        plasm.connect(rgb[:] >> image_display[:])
        plasm.connect(calc_observations['mask'] >> mask_display[:])


    if commit:
        db_inserter = capture.ObservationInserter("db_inserter", object_id=object_id, session_id=session_id)
        plasm.connect(depth[:] >> db_inserter['depth'],
                  calc_observations['R', 'T', 'mask', 'novel'] >> db_inserter['R', 'T', 'mask', 'found'],
                  rgb[:] >> db_inserter['image'],
                  image_ci['K'] >> db_inserter['K'],
                  )
    return plasm
コード例 #7
0
ファイル: detector.py プロジェクト: vrabaud/tod
    def connections(self, p):
        # Rescale the depth image and convert to 3d
        graph = [ self.passthrough['image'] >> self._depth_map['image'],
                  self._depth_map['depth'] >> self._points3d['depth'],
                  self.passthrough['K'] >> self._points3d['K'],
                  self._points3d['points3d'] >> self.guess_generator['points3d'] ]
        # make sure the inputs reach the right cells
        if 'depth' in self.feature_descriptor.inputs.keys():
            graph += [ self._depth_map['depth'] >> self.feature_descriptor['depth']]

        graph += [ self.passthrough['image'] >> self.feature_descriptor['image'],
                   self.passthrough['image'] >> self.guess_generator['image'] ]

        graph += [ self.descriptor_matcher['spans'] >> self.guess_generator['spans'],
                   self.descriptor_matcher['object_ids'] >> self.guess_generator['object_ids'] ]

        graph += [ self.feature_descriptor['keypoints'] >> self.guess_generator['keypoints'],
                   self.feature_descriptor['descriptors'] >> self.descriptor_matcher['descriptors'],
                   self.descriptor_matcher['matches', 'matches_3d'] >> self.guess_generator['matches', 'matches_3d'] ]

        cvt_color = imgproc.cvtColor(flag=imgproc.RGB2GRAY)

        if p.visualize or ECTO_ROS_FOUND:
            draw_keypoints = features2d.DrawKeypoints()
            graph += [ self.passthrough['image'] >> cvt_color[:],
                           cvt_color[:] >> draw_keypoints['image'],
                           self.feature_descriptor['keypoints'] >> draw_keypoints['keypoints']
                           ]

        if p.visualize:
            # visualize the found keypoints
            image_view = highgui.imshow(name="RGB")
            keypoints_view = highgui.imshow(name="Keypoints")

            graph += [ self.passthrough['image'] >> image_view['image'],
                       draw_keypoints['image'] >> keypoints_view['image']
                           ]

            pose_view = highgui.imshow(name="Pose")
            pose_drawer = calib.PosesDrawer()

            # draw the poses
            graph += [ self.passthrough['image', 'K'] >> pose_drawer['image', 'K'],
                       self.guess_generator['Rs', 'Ts'] >> pose_drawer['Rs', 'Ts'],
                       pose_drawer['output'] >> pose_view['image'] ]

        if ECTO_ROS_FOUND:
            ImagePub = ecto_sensor_msgs.Publisher_Image
            pub_features = ImagePub("Features Pub", topic_name='features')
            graph += [ draw_keypoints['image'] >> self.message_cvt[:],
                       self.message_cvt[:] >> pub_features[:] ]

        return graph
コード例 #8
0
ファイル: example_07.py プロジェクト: straszheim/ecto_opencv
 def __init__(self, plasm):
     ecto.BlackBox.__init__(self, plasm)
     self.video_cap = highgui.VideoCapture(video_device=0)
     self.fps = highgui.FPSDrawer()
     self.rgb2gray = imgproc.cvtColor("rgb -> gray", flag=7)
     self.circle_detector = calib.PatternDetector(
         "Dot Detector", rows=7, cols=3, pattern_type="acircles", square_size=0.03
     )
     self.circle_drawer = calib.PatternDrawer("Circle Draw", rows=7, cols=3)
     self.circle_display = highgui.imshow("Pattern show", name="Pattern", waitKey=2, maximize=True)
     self.pose_calc = calib.FiducialPoseFinder("Pose Calc")
     self.pose_draw = calib.PoseDrawer("Pose Draw")
     self.camera_info = calib.CameraIntrinsics("Camera Info", camera_file="camera.yml")
コード例 #9
0
def create_capture_plasm(bag_name):
    '''
    Creates a plasm that will capture openni data into a bag.
    '''
    # bag writing
    baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )
    #conditional writer
    bagwriter = ecto.If('Bag Writer if \'s\'',
                        cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                        )

    subs = dict(image=ImageSub(topic_name='/camera/rgb/image_color', queue_size=0),
                image_ci=CameraInfoSub(topic_name='/camera/rgb/camera_info', queue_size=0),
                depth=ImageSub(topic_name='/camera/depth_registered/image', queue_size=0),
                cloud=PointCloudSub(topic_name='/camera/depth_registered/points', queue_size=0),
                depth_ci=CameraInfoSub(topic_name='/camera/depth_registered/camera_info', queue_size=0),
                )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                                 )

    graph = [
                sync['image','depth','image_ci','depth_ci'] >> bagwriter['image','depth','image_ci','depth_ci'],
            ]

    #point cloud stuff
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
    example = easy_capture.ExampleFilter()
    graph += [
        sync['cloud'] >> msg2cloud[:],
        msg2cloud[:] >> example[:],
    ]

    #use opencv highgui to display an image.
    im2mat_rgb = ecto_ros.Image2Mat('rgb -> cv::Mat')
    display = highgui.imshow('Image Display', name='/camera/rgb/image_color', waitKey=5, autoSize=True, triggers=dict(save=ord('s')))
    bgr2rgb = imgproc.cvtColor('rgb -> bgr', flag=imgproc.Conversion.RGB2BGR)
    graph += [
        sync['image'] >> im2mat_rgb[:],
        im2mat_rgb[:] >> bgr2rgb[:],
        bgr2rgb[:] >> display[:],
        display['save'] >> bagwriter['__test__']
    ]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return plasm
コード例 #10
0
    def connect_background_source(self):
        json_db_str = '{"type": "CouchDB", "root": "http://localhost:5984", "collection": "object_recognition"}'
        self.json_db = ecto.Constant(value=json_db_str)

        object_id_str = '4680aac58c1d263b9449d57bd2000f27'
        self.object_id = ecto.Constant(value=object_id_str)

        self.image_ci = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
        self.image = ecto_ros.Image2Mat()
        self.depth = ecto_ros.Image2Mat()

        bag = "/home/sam/rosbags/sigverse/no_objects.bag"

        ImageBagger = ecto_sensor_msgs.Bagger_Image
        CameraInfoBagger = ecto_sensor_msgs.Bagger_CameraInfo

        baggers = dict(
            image=ImageBagger(
                topic_name='/hsrb/head_rgbd_sensor/rgb/image_raw'),
            image_ci=CameraInfoBagger(
                topic_name='/hsrb/head_rgbd_sensor/rgb/camera_info'),
            depth=ImageBagger(
                topic_name='/hsrb/head_rgbd_sensor/depth/image_raw'),
        )

        # this will read all images in the path
        path = '/home/sam/Code/vision/VOCdevkit/VOC2012/JPEGImages'
        self.file_source = ImageReader(path=os.path.expanduser(path))

        self.bag_reader = ecto_ros.BagReader('Bag Reader',
                                             baggers=baggers,
                                             bag=bag,
                                             random_access=True,
                                             )

        self.rgb = imgproc.cvtColor(
            'bgr -> rgb', flag=imgproc.Conversion.BGR2RGB)
        self.display = highgui.imshow(name='Training Data', waitKey=10000)
        self.image_mux = ecto_yolo.ImageMux()

        self.graph += [
            self.bag_reader['image'] >> self.image['image'],
            self.bag_reader['depth'] >> self.depth['image'],
            self.image[:] >> self.rgb[:],
            self.bag_reader['image_ci'] >> self.image_ci['camera_info'],
            self.image_ci['K'] >> self.image_mux['K1'],
            self.rgb['image'] >> self.image_mux['image1'],
            self.depth['image'] >> self.image_mux['depth1'],
            self.file_source['image'] >> self.image_mux['image2'],
        ]
コード例 #11
0
 def __init__(self, plasm):
     ecto.BlackBox.__init__(self, plasm)
     self.video_cap = highgui.VideoCapture(video_device=0)
     self.fps = highgui.FPSDrawer()
     self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
     self.circle_detector = calib.PatternDetector('Dot Detector',
                                             rows=7, cols=3, pattern_type="acircles",
                                             square_size=0.03)
     self.circle_drawer = calib.PatternDrawer('Circle Draw',
                                         rows=7, cols=3)
     self.circle_display = highgui.imshow('Pattern show',
                                     name='Pattern', waitKey=2, maximize=True)
     self.pose_calc = calib.FiducialPoseFinder('Pose Calc')
     self.pose_draw = calib.PoseDrawer('Pose Draw')
     self.camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")
コード例 #12
0
 def configure(self, p, _i, _o):
     if p.sensor=='kinect2':
         self.cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/kinect2_head/depth_lowres/points') 
     else:
         self.cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points') 
     self.msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
     self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
     self.cut_x = PassThrough(filter_field_name="x", 
                     filter_limit_min=-0.5,
                     filter_limit_max=0.5)
     self.cut_y = PassThrough(filter_field_name="y", 
                     filter_limit_min=-0.5,
                     filter_limit_max=0.5)
     self.cut_z = PassThrough(filter_field_name="z",    
                     filter_limit_min=0.5,
                     filter_limit_max=1.3)
     self.voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.001)
     self.viewer = CloudViewer("viewer", window_name="PCD Viewer", )
コード例 #13
0
 def __init__(self, plasm,rows,cols,pattern_type,square_size,debug=True):
     ecto.BlackBox.__init__(self, plasm)
     self.video_cap = ecto.Passthrough('Image Input')
     self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
     self.circle_detector = calib.PatternDetector('Dot Detector',
                                             rows=rows, cols=cols, 
                                             pattern_type=pattern_type,
                                             square_size=square_size)
     self.pose_calc = calib.FiducialPoseFinder('Pose Calc')
     self.camera_info = calib.CameraIntrinsics('Camera Info', 
                                               camera_file="camera.yml")
     self.debug = debug
     if self.debug:
         self.fps = highgui.FPSDrawer()
         self.circle_drawer = calib.PatternDrawer('Circle Draw',
                                                  rows=rows, cols=cols)
         self.circle_display = highgui.imshow('Pattern show',
                                              name='Pattern', 
                                              waitKey=2, autoSize=True)
         self.pose_draw = calib.PoseDrawer('Pose Draw')
コード例 #14
0
    def connections(self):
        # make sure the inputs reach the right cells
        connections = [self.passthrough['image'] >> self.feature_descriptor['image'],
                       self.passthrough['image'] >> self.guess_generator['image'], ]

        connections += [ self.descriptor_matcher['spans'] >> self.guess_generator['spans'],
                       self.descriptor_matcher['object_ids'] >> self.guess_generator['object_ids'] ]

        connections += [ self.feature_descriptor['keypoints'] >> self.guess_generator['keypoints'],
                self.feature_descriptor['descriptors'] >> self.descriptor_matcher['descriptors'],
                self.descriptor_matcher['matches', 'matches_3d'] >> self.guess_generator['matches', 'matches_3d'] ]

        pub_features = ImagePub("Features Pub", topic_name='features')
        cvt_color = imgproc.cvtColor(flag=imgproc.RGB2GRAY)

        draw_keypoints = features2d.DrawKeypoints()
        connections += [ self.passthrough['image'] >> cvt_color[:],
                           cvt_color[:] >> draw_keypoints['image'],
                           self.feature_descriptor['keypoints'] >> draw_keypoints['keypoints'],
                           draw_keypoints['image'] >> self.message_cvt[:],
                           self.message_cvt[:] >> pub_features[:] ]

        if self._visualize:
            # visualize the found keypoints
            image_view = highgui.imshow(name="RGB")
            keypoints_view = highgui.imshow(name="Keypoints")

            connections += [ self.passthrough['image'] >> image_view['image'],
                           draw_keypoints['image'] >> keypoints_view['image']
                           ]

            pose_view = highgui.imshow(name="Pose")
            pose_drawer = calib.PosesDrawer()

            # draw the poses
            connections += [ self.passthrough['image', 'K'] >> pose_drawer['image', 'K'],
                              self.guess_generator['Rs', 'Ts'] >> pose_drawer['Rs', 'Ts'],
                              pose_drawer['output'] >> pose_view['image'] ]

        return connections
コード例 #15
0
#!/usr/bin/env python
# add pose estimation
import ecto
from ecto_opencv import highgui, calib, imgproc

plasm = ecto.Plasm()
sched = ecto.schedulers.Singlethreaded(plasm)

video_cap = highgui.VideoCapture(video_device=0)

fps = highgui.FPSDrawer()

rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)

circle_detector = calib.PatternDetector(rows=7,
                                        cols=3,
                                        pattern_type="acircles",
                                        square_size=0.03)
circle_drawer = calib.PatternDrawer(rows=7, cols=3)
circle_display = highgui.imshow('Pattern show',
                                name='Pattern',
                                waitKey=2,
                                maximize=True)

pose_calc = calib.FiducialPoseFinder('Pose Calc')

pose_draw = calib.PoseDrawer('Pose Draw')

camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")

plasm.connect(
コード例 #16
0
ファイル: scanner_plasm.py プロジェクト: rudy-wang/vision
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired,
                                            sensor, res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []
    
#   Create ros node, subscribing to openni topics
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(argv)
    ecto_ros.init(argv, "object_capture_msd_pcl", False)
    cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points') 
    # openni&freenect requires '/camera/rgb/image_rect_color', openni2 just need '/camera/rgb/image_raw'
    if sensor in ['kinect']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_color')
    elif sensor in ['xtion']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_raw')
    depth_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("depth_image_sub", topic_name='/camera/depth_registered/image_raw')  
    rgb_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("rgb_camera_info_sub", topic_name='/camera/rgb/camera_info')
    depth_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("depth_camera_info_sub", topic_name='/camera/depth_registered/camera_info')
        
    
#   Converte ros topics to cv types
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
    image2cv = ecto_ros.Image2Mat()
    depth2cv = ecto_ros.Image2Mat()
    rgb_info2cv = ecto_ros.CameraInfo2Cv()
    depth_info2cv = ecto_ros.CameraInfo2Cv()


    graph += [ cloud_sub['output'] >> msg2cloud[:],
               rgb_image_sub[:] >> image2cv[:],
               depth_image_sub[:] >> depth2cv[:],
               rgb_camera_info_sub[:] >> rgb_info2cv[:],
               depth_camera_info_sub[:] >> depth_info2cv[:]
                ]
    """            
    rgb_display = highgui.imshow(name='rgb')
    depth_display = highgui.imshow(name='depth')
    graph += [image2cv[:] >> rgb_display[:],
              depth2cv[:] >> depth_display[:],
            ]
            
    #   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ msg2cloud[:] >> viewer[:] ]
    """   
#   Process pointcloud

    # Cut off some parts
    
    cut_x = PassThrough(filter_field_name="x", 
                        filter_limit_min=-0.2,
                        filter_limit_max=0.2)
                        
    cut_y = PassThrough(filter_field_name="y", 
                        filter_limit_min=-0.5,
                        filter_limit_max=0.5)
    cut_z = PassThrough(filter_field_name="z",    
                        filter_limit_min=-0.0,
                        filter_limit_max=1.0)
                        
    graph += [ msg2cloud[:] >> cut_x[:],
               cut_x[:] >> cut_y[:],
               cut_y[:] >> cut_z[:] ]
               
    # Voxel filter
    
    voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.002)
    
    graph += [ cut_z[:] >> voxel_grid[:] ]
               
    # Find plane and delete it
    
    normals = NormalEstimation("normals", k_search=0, radius_search=0.02)
    
    graph += [ voxel_grid[:] >> normals[:] ]
    
    plane_finder = SACSegmentationFromNormals("planar_segmentation",
                                                 model_type=SACMODEL_NORMAL_PLANE,
                                                 eps_angle=0.09, distance_threshold=0.1)
    
    plane_deleter = ExtractIndices(negative=True)
    
    graph += [ voxel_grid[:] >> plane_finder['input'],
               normals[:] >> plane_finder['normals'] ]
    graph += [ voxel_grid[:]  >> plane_deleter['input'],
               plane_finder['inliers'] >> plane_deleter['indices'] ]
#   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ plane_deleter[:] >> viewer[:] ]
    
#   Compute vfh
    
    vfh_signature = VFHEstimation(radius_search=0.01)
    
    graph += [ plane_deleter[:] >>  vfh_signature['input'],
               normals[:] >> vfh_signature['normals'] ]                                                          
    
    # convert the image to grayscale                                                                      
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [image2cv[:]  >> rgb2gray[:] ]
                
    # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only           
    poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
    graph += [ image2cv[:] >> poser['color_image'],
               rgb_info2cv['K'] >> poser['K_image'],
               rgb2gray[:] >> poser['image'] ]         #poser gives us R&T from camera to dot pattern
    
    
    points3d = calib.DepthTo3d()
    
    graph += [ depth_info2cv['K'] >> points3d['K'],
               depth2cv['image'] >> points3d['depth']
             ]
    
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    # Convert K if the resolution is different (the camera should output that)
    
    graph += [ # find the normals
                depth_info2cv['K'] >> compute_normals['K'],
                points3d['points3d'] >> compute_normals['points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                depth_info2cv['K'] >> plane_est['K'],
                points3d['points3d'] >> plane_est['points3d'] ]
    

    
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ depth_info2cv['K'] >> pose_filter['K_depth'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]
               
    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))
                                                          
    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]
    
    trainer_ = Trainer()
    
    graph += [ pose_filter['R', 'T'] >> trainer_['R', 'T'],
               vfh_signature['output'] >> trainer_['features'],
               delta_pose['last'] >> trainer_['last'],
               delta_pose['novel'] >> trainer_['novel'],
               plane_deleter[:] >> trainer_['input'] ]
 
    novel = delta_pose['novel']

    cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")

    graph += [ plane_deleter[:] >> cloud2msg[:] ]   
    
    baggers = dict(points=PointCloudBagger(topic_name='/camera/depth_registered/points'),
                   pose=PoseBagger(topic_name='/camera/pose')
                   )
                   
    bagwriter = ecto.If('Bag Writer if R|T', cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name))
    pcd_writer = ecto.If('msd pcd writer', cell = PCDWriter())
    
    graph += [    cloud2msg[:] >> bagwriter['points'],
                  poseMsg['pose'] >> bagwriter['pose']
                  ]
    graph += [ plane_deleter['output'] >> pcd_writer['input'] ]
    
    delta_pose.inputs.__test__ = True
    pcd_writer.inputs.__test__ = True
    
    graph += [novel >> (bagwriter['__test__'])]  
    graph += [novel >> (pcd_writer['__test__'])]
    
    rt2pose = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")
    rt2pose_filtered = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")      
    posePub = PosePublisher("pose_pub", topic_name='pattern_pose')
    posePub_filtered = PosePublisher("pose_pub_filtered", topic_name='pattern_pose_filtered')
    graph += [ poser['R', 'T'] >> rt2pose['R', 'T'],
               rt2pose['pose'] >> posePub['input'],
               pose_filter['R', 'T'] >> rt2pose_filtered['R', 'T'],
               rt2pose_filtered['pose'] >> posePub_filtered['input'] ]
   
    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
コード例 #17
0
    
    bag_name = sys.argv[1]
    node_name = "labeled_example_writer"
    ecto_ros.init(sys.argv, node_name,False)
    
    #subscriptions
    subs = dict(image=ecto_sensor_msgs.Subscriber_Image(topic_name='/camera/rgb/image_color', queue_size=1),
            info=ecto_sensor_msgs.Subscriber_CameraInfo(topic_name='/camera/rgb/camera_info', queue_size=1),
            cloud=ecto_sensor_msgs.Subscriber_PointCloud2(topic_name='/camera/rgb/points', queue_size=1),
            )
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)
    sub_keys = subs.keys()
    
    #conversions
    img2mat = ecto_ros.Image2Mat("Image to Mat", swap_rgb=True)
    rgb2gray = imgproc.cvtColor('RGB -> Gray', flag=imgproc.Conversion.RGB2GRAY)
    info2cv = ecto_ros.CameraInfo2Cv("Info to CV")
    to_pose_stamped = ecto_ros.RT2PoseStamped("To Pose Stamped",frame_id="openni_rgb_optical_frame")
    
    #pose estimation
    rows = 5; cols = 3; square_size=0.04; pattern_type=calib.ASYMMETRIC_CIRCLES_GRID
    circle_detector = calib.PatternDetector(rows=rows, cols=cols,
                                        pattern_type=pattern_type,
                                        square_size=square_size)
    poser = calib.FiducialPoseFinder()
    offset = ecto_corrector.TranslationOffset("Offset",tx=-0.079,ty=0.120,tz=0.000)

    #visualization
    circle_drawer = calib.PatternDrawer(rows=rows, cols=cols)
    pose_drawer = calib.PoseDrawer()
    show_triggers = {'c_key':ord('c')}
コード例 #18
0
#!/usr/bin/env python
import ecto
from ecto_opencv.highgui import VideoCapture, imshow, FPSDrawer
from ecto_opencv.imgproc import Canny, RGB2GRAY, cvtColor

video_cap = VideoCapture(video_device=0, width=640, height=480)
fps = FPSDrawer()
canny = Canny()
gray = cvtColor(flag=RGB2GRAY)
plasm = ecto.Plasm()
plasm.connect(
    video_cap['image'] >> gray[:],
    gray[:] >> canny['image'],
    canny['image'] >> fps['image'],
    fps['image'] >> imshow(name='Canny Image')['image'],
    video_cap['image'] >> imshow(name='Original Image')['image'],
)

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm, description='Capture a video from the device and display it.')
コード例 #19
0
#!/bin/python
import ecto
from ecto_opencv import imgproc, highgui, features2d
import time
#import orb as imgproc

debug = False
#debug = True

plasm = ecto.Plasm()

video = highgui.VideoCapture(video_device=0)
orb_m = features2d.ORB(n_features=2500)
draw_kpts = features2d.DrawKeypoints()
imshow = highgui.imshow(name="ORB", waitKey=2, autoSize=True)
rgb2gray = imgproc.cvtColor(flag=7)

plasm.connect(video, "image", rgb2gray, "input")
plasm.connect(rgb2gray, "out", orb_m, "image")
plasm.connect(orb_m, "kpts", draw_kpts, "kpts")
plasm.connect(video, "image", draw_kpts, "input")
plasm.connect(draw_kpts, "output", imshow, "input")

if debug:
    print plasm.viz()
    ecto.view_plasm(plasm)

prev = time.time()
count = 0

while (imshow.outputs.out != 27):
コード例 #20
0
ファイル: canny.py プロジェクト: ethanrublee/ecto_opencv
#!/usr/bin/env python
import ecto
from ecto_opencv.highgui import VideoCapture, imshow, FPSDrawer
from ecto_opencv.imgproc import Canny, RGB2GRAY, cvtColor

video_cap = VideoCapture(video_device=0, width=640, height=480)
fps = FPSDrawer()
canny = Canny()
gray = cvtColor(flag=RGB2GRAY)
plasm = ecto.Plasm()
plasm.connect(video_cap['image'] >> gray[:],
              gray[:] >> canny['image'],
              canny['image'] >> fps['image'],
              fps['image'] >> imshow(name='Canny Image')['image'],
              video_cap['image'] >> imshow(name='Original Image')['image'],
              )

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm, description='Capture a video from the device and display it.')
コード例 #21
0
ImageBagger = ecto_sensor_msgs.Bagger_Image
CameraInfoBagger = ecto_sensor_msgs.Bagger_CameraInfo
baggers = dict(
    image=ImageBagger(topic_name='/hsrb/head_rgbd_sensor/rgb/image_raw'),
    image_ci=CameraInfoBagger(topic_name='/hsrb/head_rgbd_sensor/rgb/camera_info'),
)

source = ecto_ros.BagReader(
    'Bag Reader',
    baggers=baggers,
    bag=bag,
    random_access=False,
)

image = ecto_ros.Image2Mat()
rgb = imgproc.cvtColor('bgr -> rgb', flag=imgproc.Conversion.BGR2RGB)
display = imshow(name='RGB', triggers=dict(save=ord('s')))

detector = ecto_yolo.Detector(
    data_config="/home/sam/OrkData/cfg/ork.dataset",
    network_config="/home/sam/OrkData/cfg/yolov3-ork.cfg",
    weights="/home/sam/OrkData/weights/yolov3-ork_34000.weights",
    detection_threshold=0.2,
)

# ecto options
scheduler_options(parser)
args = parser.parse_args()

plasm = ecto.Plasm()
plasm.connect(
コード例 #22
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired=72,
                                            orb_template='', res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []

    # try several parameter combinations
    source = create_source('image_pipeline', 'OpenNISource', outputs_list=['K_depth', 'K_image', 'camera', 'image',
                                                                           'depth', 'points3d',
                                                                           'mask_depth'], res=res, fps=fps)

    # convert the image to grayscale
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [source['image'] >> rgb2gray[:] ]

    # Find planes
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    # Convert K if the resolution is different (the camera should output that)
    graph += [ # find the normals
                source['K_depth', 'points3d'] >> compute_normals['K', 'points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                source['K_depth', 'points3d'] >> plane_est['K', 'points3d'] ]

    if orb_template:
        # find the pose using ORB
        poser = OrbPoseEstimator(directory=orb_template, show_matches=orb_matches)
        graph += [ source['image', 'K_image', 'mask_depth', 'points3d'] >> poser['color_image', 'K_image', 'mask', 'points3d'],
                   rgb2gray[:] >> poser['image']
                 ]
    else:
        # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only
        poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
        graph += [ source['image', 'K_image'] >> poser['color_image', 'K_image'],
                   rgb2gray[:] >> poser['image'] ]

    # filter the previous pose and resolve the scale ambiguity using 3d
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ source['K_depth'] >> pose_filter['K_depth'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]

    # draw the found pose
    pose_drawer = calib.PoseDrawer('Pose Draw')
    display = highgui.imshow(name='Poses')
    graph += [ pose_filter['found'] >> pose_drawer['trigger'],
               poser['debug_image'] >> pose_drawer['image'],
               source['K_image'] >> pose_drawer['K'],
               pose_filter['R', 'T'] >> pose_drawer['R', 'T'],
               pose_drawer['output'] >> display[:] ]

    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))

    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]

    # publish the source data
    rgbMsg = Mat2Image(frame_id='/camera_rgb_optical_frame', swap_rgb=True)
    depthMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ source['depth'] >> depthMsg[:],
               source['image'] >> rgbMsg[:] ]

    # mask out the object
    masker = segmentation_cell
    graph += [ source['points3d'] >> masker['points3d'],
               plane_est['masks', 'planes'] >> masker['masks', 'planes'],
               pose_filter['T'] >> masker['T'] ]

    # publish the mask
    maskMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ masker['mask'] >> maskMsg[:] ]

    camera2cv = CameraModelToCv()
    cameraMsg = Cv2CameraInfo(frame_id='/camera_rgb_optical_frame')
    graph += [source['camera'] >> camera2cv['camera'],
              camera2cv['K', 'D', 'image_size'] >> cameraMsg['K', 'D', 'image_size']
              ]
    #display the mask
    mask_display = object_recognition_capture.MaskDisplay()
    mask_display_highgui = highgui.imshow(name='mask')
    graph += [
              masker['mask'] >> mask_display['mask'],
              source['image'] >> mask_display['image'],
              mask_display['image'] >> mask_display_highgui['image'],
            ]
    if not preview:
        baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   mask=ImageBagger(topic_name='/camera/mask'),
                   pose=PoseBagger(topic_name='/camera/pose'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )
        bagwriter = ecto.If('Bag Writer if R|T',
                            cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                            )

        graph += [
                  rgbMsg[:] >> bagwriter['image'],
                  depthMsg[:] >> bagwriter['depth'],
                  cameraMsg[:] >> (bagwriter['image_ci'], bagwriter['depth_ci']),
                  poseMsg['pose'] >> bagwriter['pose'],
                  maskMsg[:] >> bagwriter['mask'],
                  ]
        novel = delta_pose['novel']
        if use_turn_table:
            table = TurnTable(angle_thresh=angle_thresh)
            ander = ecto.And()
            graph += [
                  table['trigger'] >> (delta_pose['__test__'], ander['in2']),
                  delta_pose['novel'] >> ander['in1'],
                  ]
            novel = ander['out']
        else:
            delta_pose.inputs.__test__ = True

        graph += [novel >> (bagwriter['__test__'])]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
コード例 #23
0
def do_projector():
    options = parse_options()

    # define the input
    subs = dict(image=ImageSub(topic_name='camera/rgb/image_color', queue_size=0),
                depth=ImageSub(topic_name='camera/depth/image', queue_size=0),
                depth_info=CameraInfoSub(topic_name='camera/depth/camera_info', queue_size=0),
                image_info=CameraInfoSub(topic_name='camera/rgb/camera_info', queue_size=0),
             )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()
    invert = imgproc.BitwiseNot()
    brg2rgb = imgproc.cvtColor('bgr -> rgb', flag=4)

    camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")

    pattern_draw = projector.PatternProjector()
    circle_detector = calib.PatternDetector('Dot Detector',
                                                rows=5, cols=3,
                                                pattern_type="acircles",
                                                square_size=0.04)
    circle_drawer = calib.PatternDrawer('Circle Draw',
                                                     rows=5, cols=3)

    calibrator = projector.Calibrator();

    s1 = ecto.Strand()
    main_display = highgui.imshow("rgb show", name="rgb", waitKey=5, strand=s1)
    graph = [
                sync["image"] >> im2mat_rgb["image"],
                im2mat_rgb["image"] >> main_display[:],
                sync["depth"] >> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1, strand=s1)[:],
                im2mat_rgb["image"] >> invert[:],
                invert[:] >> circle_detector["input"],
                im2mat_rgb["image"] >> brg2rgb["input"],
                brg2rgb["out"] >> circle_drawer['input'],
                circle_detector['out', 'found'] >> circle_drawer['points', 'found'],
                circle_drawer["out"] >> highgui.imshow("pattern show", name="pattern", waitKey= -1, strand=s1)[:],
#                sync["image","depth"] >> pattern_detection['image', 'depth'],
#                pattern_detection['points'] >> projection_estimator['points']
            ]

    print "Press 's' to capture a view."

    # add the display of the pattern
    video_display = highgui.imshow('pattern',
                               name='video_cap', waitKey=2, maximize=False, autoSize=True)
    graph += [pattern_draw['pattern'] >> video_display['input'],
              pattern_draw['points'] >> calibrator['pattern'],
              circle_detector['out', 'found'] >> calibrator['points', 'found'],
              camera_info['K'] >> calibrator['K'],
              main_display['out'] >> calibrator['trigger'],
              im2mat_depth["image"] >> calibrator['depth'] ]


    plasm = ecto.Plasm()
    plasm.connect(graph)

    # display DEBUG data if needed
    if DEBUG:
        print plasm.viz()
        ecto.view_plasm(plasm)

    # execute the pipeline
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()
コード例 #24
0
    
    debug = True
    poser = OpposingDotPoseEstimator(plasm,
                                        rows=5, cols=3, 
                                        pattern_type="acircles",
                                        square_size=0.04, debug=debug)
    camera_info = calib.CameraIntrinsics('Camera Info',
                                                  camera_file="camera.yml")
    subs = dict( image=ImageSub(topic_name='camera/rgb/image_color',queue_size=0),
                depth=ImageSub(topic_name='camera/depth/image',queue_size=0),
             )
    
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                                 )

    brg2rgb = imgproc.cvtColor('bgr -> rgb', flag=4)
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
    gray2rgb = imgproc.cvtColor('gray -> rgb', flag=8)
    print gray2rgb.__doc__
    

    display = highgui.imshow('Poses', name='Poses', waitKey=2, autoSize=True)
    mask_display = highgui.imshow('Masks', name='Masks', waitKey=-1, autoSize=True)
    object_display = highgui.imshow('Object', name='Object', waitKey=-1, autoSize=True)

    imsaver = highgui.ImageSaver('pose image saver',filename='image_pose_')
    rawsaver = highgui.ImageSaver('raw image saver', filename='image_raw_')
    masker = calib.PlanarSegmentation(z_min=0.015, y_crop=0.15, x_crop=0.15)
    bitwise_and = imgproc.BitwiseAnd()
    
    im2mat_rgb = ecto_ros.Image2Mat()
コード例 #25
0
ファイル: plane.py プロジェクト: ArthurVal/riddle_ork
    scheduler_options(parser.add_argument_group("Scheduler"))
    options = parser.parse_args()

    return options


if __name__ == "__main__":
    options = parse_args()

    plasm = ecto.Plasm()

    # setup the input source, grayscale conversion
    from ecto_openni import SXGA_RES, FPS_15

    source = create_source("image_pipeline", "OpenNISource", image_mode=SXGA_RES, image_fps=FPS_15)
    rgb2gray = cvtColor("Grayscale", flag=Conversion.RGB2GRAY)
    plane_finder = PlaneFinder(l=600, nu=100)
    depth_to_3d = DepthTo3d()

    plasm.connect(source["image"] >> rgb2gray["image"])

    # connect up the pose_est
    connections = [
        source["image"] >> plane_finder["image"],
        source["depth_raw"] >> depth_to_3d["depth"],
        source["K"] >> depth_to_3d["K"],
        depth_to_3d["points3d"] >> plane_finder["points3d"],
        source["K"] >> plane_finder["K"],
    ]
    connections += [plane_finder["image"] >> imshow(name="hulls")[:], source["image"] >> imshow(name="original")[:]]
    plasm.connect(connections)
コード例 #26
0
def do_projector():
    options = parse_options()

    # define the input
    subs = dict(image=ImageSub(topic_name='camera/rgb/image_color', queue_size=1),
                depth=ImageSub(topic_name='camera/depth/image', queue_size=1),
                #depth_info=CameraInfoSub(topic_name='camera/depth/camera_info', queue_size=0),
                #image_info=CameraInfoSub(topic_name='camera/rgb/camera_info', queue_size=0),
             )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()
    invert = imgproc.BitwiseNot()
    brg2rgb = imgproc.cvtColor('bgr -> rgb', flag=4)
    bgr2gray = imgproc.cvtColor('bgr -> gray', flag=7)

    camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")

    calibrator = projector.Calibrator()
    plasm = ecto.Plasm()
    offset_x = 0

    s1 = ecto.Strand()
    main_display = highgui.imshow("rgb show", name="rgb", waitKey=5, strand=s1)
    graph = [
                sync["image"] >> im2mat_rgb["image"],
                im2mat_rgb["image"] >> main_display[:],
                sync["depth"] >> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1, strand=s1)[:],
                im2mat_rgb["image"] >> (brg2rgb["input"], bgr2gray["input"]),
#                sync["image","depth"] >> pattern_detection['image', 'depth'],
#                pattern_detection['points'] >> projection_estimator['points']
            ]

    print "Press 's' to capture a view."

    # add the display of the pattern
    video_display = highgui.imshow('pattern',
                               name='video_cap', waitKey=2, maximize=False, autoSize=True)
    case = 2
    if case == 0:
        offset_x = -.25
        pose_from_fiducial = PoseFromFiducial(plasm,
                                        rows=5, cols=3,
                                        pattern_type="acircles",
                                        square_size=0.04,
                                        offset_x=offset_x,
                                        debug=DEBUG)
        # deal with fiducial markers
        graph += [ brg2rgb["out"] >> pose_from_fiducial['color_image'],
                bgr2gray["out"] >> pose_from_fiducial['image'],
                camera_info['K'] >> pose_from_fiducial['K'],
                pose_from_fiducial['debug_image'] >> highgui.imshow("pattern show", name="pattern", waitKey= -1, strand=s1)[:],
                ]
        # Deal with the warping
        warper = projector.FiducialWarper(projection_file='projector_calibration.yml',
        radius=0.10)
        graph += [pose_from_fiducial['R', 'T', 'found'] >> warper['R', 'T', 'found'],
              warper['output'] >> highgui.imshow("warped image", name="warped", waitKey= -1, strand=s1)[:],
              ]
    elif case == 1:
        warper = projector.DepthWarper(projection_file='projector_calibration.yml')
        graph += [camera_info['K'] >> warper['K'],
                  im2mat_depth["image"] >> warper['depth'],
                  warper['output'] >> highgui.imshow("warped image", name="warped", waitKey= -1, strand=s1)[:],
                  ]
    elif case == 2:
        # Deal with the warping
        warper = projector.ImageWarper(projection_file='projector_calibration.yml', offset_x=0,offset_y=0)
        pose_from_plane = projector.PlaneFitter()
        pose_draw = calib.PoseDrawer('Plane Pose Draw')
        graph += [im2mat_depth["image"] >> pose_from_plane['depth'],
                  camera_info['K'] >> pose_from_plane['K'],
                  pose_from_plane['R', 'T'] >> warper['R', 'T'],
                  im2mat_rgb["image"] >> (pose_draw['image'],),
                  highgui.imread(image_file='lena.jpg')["image"] >> (warper['image'],),
                  camera_info['K'] >> pose_draw['K'],
                  pose_from_plane['R', 'T'] >> pose_draw['R', 'T'],
                  pose_draw['output'] >> highgui.imshow("pose", name="pose", waitKey= -1, strand=s1)[:],
                  warper['output'] >> highgui.imshow("warped image", name="warped", waitKey= -1, strand=s1)[:],
                  ]

    plasm.connect(graph)

    # display DEBUG data if needed
    #if DEBUG:
    #    print plasm.viz()
    #    ecto.view_plasm(plasm)

    # execute the pipeline
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()
コード例 #27
0
ファイル: webcam_pose.py プロジェクト: ethanrublee/ecto_talk
#!/usr/bin/env python
# add pose estimation
import ecto
from ecto_opencv import highgui, imgproc, calib

plasm = ecto.Plasm()
video_cap = highgui.VideoCapture(video_device=0)

fps = highgui.FPSDrawer()
rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.RGB2GRAY)

detect = calib.PatternDetector(rows=5, cols=3,
                               pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                               square_size=0.04)

draw = calib.PatternDrawer(rows=5, cols=3)

pose_calc = calib.FiducialPoseFinder('Pose Calc')
pose_draw = calib.PoseDrawer('Pose Draw')
camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")

imshow = highgui.imshow(name='Pattern', waitKey=2)

plasm.connect(
    video_cap['image'] >> rgb2gray['input'],
    rgb2gray['out'] >> detect['input'],
    detect['out', 'found'] >> draw['points', 'found'],
    video_cap['image'] >> draw['input'],
    camera_info['K'] >> (pose_calc['K'], pose_draw['K']),
    detect['out', 'ideal', 'found'] >> pose_calc['points', 'ideal', 'found'],
    pose_calc['R', 'T'] >> pose_draw['R', 'T'],
コード例 #28
0
ファイル: colormod.py プロジェクト: garybradski/ecto_opencv
    template_size=cv.Size(45, 120),
    sample_skipx=5,
    sample_skipy=5,
    match_thresh=0.95,
    search_skipx=5,
    search_skipy=5,
    hist_type=1,
    overlap_thresh=0.5,
)
IdGen = line_mod.IdGenerator(object_id="Odwalla")
DrawDetections = line_mod.DrawDetections()

# this will read all images on the user's Desktop
images = highgui.ImageReader("image reader", path=os.path.expanduser("/home/bradski/code/data/set01/images"))
masks = highgui.ImageReader("mask reader", path=os.path.expanduser("/home/bradski/code/data/set01/masks"))
rgb2gray = imgproc.cvtColor("rgb -> gray", flag=7)

# this is similar to a slide show... Wait forever for a key
image_display = highgui.imshow("image display", name="image", waitKey=-1, autoSize=True)
mask_display = highgui.imshow("mask display", name="mask", waitKey=-1, autoSize=True)
colormod_display = highgui.imshow("mod display", name="mod", waitKey=-1, autoSize=True)
detections_display = highgui.imshow("detections", name="detect", waitKey=0, autoSize=True)

plasm.connect(
    images["out"] >> (image_display["input"], DrawDetections["raw"], bin_color["image"]),
    bin_color[:] >> (db_color[:], testimage["colorord"]),
    db_color[:] >> colormod_display[:],
    testimage["detections"] >> DrawDetections["detect"],
    DrawDetections["out"] >> detections_display["input"],
)
コード例 #29
0
ファイル: webcam_canny.py プロジェクト: straszheim/ecto_talk
#!/usr/bin/env python

import ecto
from ecto_opencv import highgui, imgproc

plasm = ecto.Plasm()
video_cap = highgui.VideoCapture(video_device=0)

fps = highgui.FPSDrawer()
rgb2gray = imgproc.cvtColor("rgb -> gray", flag=imgproc.RGB2GRAY)

blur = imgproc.GaussianBlur(kernel=0, sigma=1)

canny = imgproc.Canny()

imshow = highgui.imshow(name="Pattern", waitKey=2)

plasm.connect(
    video_cap["image"] >> rgb2gray["input"],
    rgb2gray["out"] >> blur["input"],
    blur["out"] >> canny["input"],
    canny["out"] >> fps["image"],
    fps["image"] >> imshow["input"],
)

if __name__ == "__main__":
    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute_async()

    from IPython.Shell import IPShellEmbed
コード例 #30
0
    parser = argparse.ArgumentParser(description='Find a plane in an RGBD image.')
    scheduler_options(parser.add_argument_group('Scheduler'))
    options = parser.parse_args()

    return options


if __name__ == '__main__':
    options = parse_args()

    plasm = ecto.Plasm()

    #setup the input source, grayscale conversion
    from ecto_openni import SXGA_RES, FPS_15
    source = create_source('image_pipeline','OpenNISource',image_mode=SXGA_RES,image_fps=FPS_15)
    rgb2gray = cvtColor('Grayscale', flag=Conversion.RGB2GRAY)
    plane_finder = PlaneFinder(l=600, nu=100)
    depth_to_3d = DepthTo3d()

    plasm.connect(source['image'] >> rgb2gray ['image'])

    #connect up the pose_est
    connections = [ source['image'] >> plane_finder['image'],
                    source['depth_raw'] >> depth_to_3d['depth'],
                    source['K'] >> depth_to_3d['K'],
                    depth_to_3d['points3d'] >> plane_finder['points3d'],
                    source['K'] >> plane_finder['K'] ]
    connections += [plane_finder['image'] >> imshow(name='hulls')[:],
                    source['image'] >> imshow(name='original')[:]]
    plasm.connect(connections)
コード例 #31
0
ファイル: colormod.py プロジェクト: Jigarsenjalia/ecto_opencv
                                    match_thresh=0.95,
                                    search_skipx=5,
                                    search_skipy=5,
                                    hist_type=1,
                                    overlap_thresh=.5)
IdGen = line_mod.IdGenerator(object_id="Odwalla")
DrawDetections = line_mod.DrawDetections()

#this will read all images on the user's Desktop
images = highgui.ImageReader(
    "image reader",
    path=os.path.expanduser("/home/bradski/code/data/set01/images"))
masks = highgui.ImageReader(
    "mask reader",
    path=os.path.expanduser("/home/bradski/code/data/set01/masks"))
rgb2gray = imgproc.cvtColor("rgb -> gray", flag=7)

#this is similar to a slide show... Wait forever for a key
image_display = highgui.imshow("image display",
                               name="image",
                               waitKey=-1,
                               autoSize=True)
mask_display = highgui.imshow("mask display",
                              name="mask",
                              waitKey=-1,
                              autoSize=True)
colormod_display = highgui.imshow("mod display",
                                  name="mod",
                                  waitKey=-1,
                                  autoSize=True)
detections_display = highgui.imshow("detections",
コード例 #32
0
    poser = OpposingDotPoseEstimator(plasm,
                                     rows=5,
                                     cols=3,
                                     pattern_type="acircles",
                                     square_size=0.04,
                                     debug=debug)
    camera_info = calib.CameraIntrinsics('Camera Info',
                                         camera_file="camera.yml")
    subs = dict(
        image=ImageSub(topic_name='camera/rgb/image_color', queue_size=0),
        depth=ImageSub(topic_name='camera/depth/image', queue_size=0),
    )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    brg2rgb = imgproc.cvtColor('bgr -> rgb', flag=4)
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
    gray2rgb = imgproc.cvtColor('gray -> rgb', flag=8)
    print gray2rgb.__doc__

    display = highgui.imshow('Poses', name='Poses', waitKey=2, autoSize=True)
    mask_display = highgui.imshow('Masks',
                                  name='Masks',
                                  waitKey=-1,
                                  autoSize=True)
    object_display = highgui.imshow('Object',
                                    name='Object',
                                    waitKey=-1,
                                    autoSize=True)

    imsaver = highgui.ImageSaver('pose image saver', filename='image_pose_')
コード例 #33
0
def create_capture_plasm(bag_name, angle_thresh):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''

    plasm = ecto.Plasm()



    baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )

    bagwriter = ecto.If('Bag Writer if R|T',
                        cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                        )

    subs = dict(image=ImageSub(topic_name='/camera/rgb/image_color', queue_size=0),
                depth=ImageSub(topic_name='/camera/depth_registered/image', queue_size=0),
                image_ci=CameraInfoSub(topic_name='/camera/rgb/camera_info', queue_size=0),
                depth_ci=CameraInfoSub(topic_name='/camera/depth_registered/camera_info', queue_size=0),
                )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                                 )
    keys = subs.keys()

    graph = [
                sync[:] >> bagwriter[keys],
            ]

    im2mat_rgb = ecto_ros.Image2Mat('rgb -> cv::Mat')
    camera_info = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
    poser = OpposingDotPoseEstimator(plasm,
                                     rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)

    bgr2rgb = imgproc.cvtColor('rgb -> bgr', flag=imgproc.Conversion.RGB2BGR)
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    delta_pose = capture.DeltaRT("delta R|T", angle_thresh=angle_thresh)
    display = highgui.imshow('Poses', name='Poses', waitKey=5, autoSize=True, triggers=dict(save=ord('s')))
    saver = ecto.If(cell=highgui.ImageSaver("saver", filename_format='ecto_image_%05d.jpg',
                                   start=1))

    graph += [sync['image'] >> im2mat_rgb[:],
              im2mat_rgb[:] >> (rgb2gray[:], bgr2rgb[:]),
              bgr2rgb[:] >> poser['color_image'],
              rgb2gray[:] >> poser['image'],
              poser['debug_image'] >> (display['input'], saver['image']),
              display['save'] >> saver['__test__'],
              sync['image_ci'] >> camera_info['camera_info'],
              camera_info['K'] >> poser['K'],
              poser['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
              delta_pose['novel'] >> bagwriter['__test__'],
              ]
    plasm.connect(graph)
    return plasm
コード例 #34
0
from ecto_opencv.imgproc import cvtColor, Conversion

from ecto_openni import OpenNICapture, DEPTH_RGB
from ecto_opencv.calib import DepthTo3d, DepthMask
from ecto.opts import scheduler_options, run_plasm

from image_pipeline import LatchMat
import ecto_registration
from registration import PoseEstimator3DProj, FeatureFinder, RotateZ

n_feats = 1000
plasm = ecto.Plasm()

#setup the input source, grayscale conversion
capture = OpenNICapture(stream_mode=DEPTH_RGB, registration=True, sync=True)
rgb2gray = cvtColor (flag=Conversion.RGB2GRAY)

plasm.connect(capture['image'] >> rgb2gray ['image'])

#convenience variable for the grayscale
img_src = rgb2gray['image']

# calculate 3d points
depthTo3d = DepthTo3d()
plasm.connect(capture['depth'] >> depthTo3d['depth'],
              capture['K'] >> depthTo3d['K']
              )

# connect up the test ORB
orb = FeatureFinder('ORB test', n_features=n_feats, n_levels=6, scale_factor=1.2, thresh=100, use_fast=False)
コード例 #35
0
ファイル: doit.py プロジェクト: straszheim/rounder
udim = 0.04

ppcm = 20
xcm = 7
ycm = 10
srr = calib.SubrectRectifier("extractor",
                             xoffset=-0.135, yoffset=0,
                             xsize_world=xcm*0.01, ysize_world=ycm*0.01,
                             xsize_pixels=xcm*ppcm, ysize_pixels=ycm*ppcm)

# sub_rgb = ecto_sensor_msgs.Subscriber_Image('Image sub',topic_name='image')
im2mat_rgb = ecto_ros.Image2Mat('Image -> cv::Mat')
im2mat_depth = ecto_ros.Image2Mat('Depth -> cv::Mat')

rgb2bgr = imgproc.cvtColor('rgb -> bgr')
#pose_gen = ecto_ros.RT2PoseStamped('R,T -> PoseStamped',frame_id='/openni_rgb_optical_frame')
#pose_pub = ecto_geometry_msgs.Publisher_PoseStamped('Pose Pub',topic_name='dot_pose')


training_card_conv = ecto_ros.Mat2Image('Training Card Converter')
training_card_pub = ecto_sensor_msgs.Publisher_Image('Training Card Publisher',
                                                     topic_name = '/ecto/training_card')
                                             
pose_from_plane = projector.PlaneFitter()

dewarp_table = calib.SubrectRectifier("tabledewarper",
                                      xoffset=-0.3, yoffset=-0.3,
                                      xsize_world=0.6, ysize_world=0.6,
                                      xsize_pixels=int(ppcm*60), ysize_pixels=int(ppcm*60))
コード例 #36
0
#!/usr/bin/env python
# add circle detection
import ecto
from ecto_opencv import highgui, calib, imgproc

plasm = ecto.Plasm()
sched = ecto.schedulers.Singlethreaded(plasm)

video_cap = highgui.VideoCapture(video_device=0)

fps = highgui.FPSDrawer()

video_display = highgui.imshow('imshow',
                               name='video_cap', waitKey=2, maximize=False)

rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)

circle_detector = calib.PatternDetector(rows=7, cols=3, 
                                        pattern_type="acircles", square_size=0.03)
circle_drawer = calib.PatternDrawer(rows=7, cols=3)
circle_display = highgui.imshow('Pattern show',name='Pattern',waitKey=-1,maximize=False)

plasm.connect(video_cap['image'] >> rgb2gray['input'],
              video_cap['image'] >> (circle_drawer['input'],
                                     video_display['input']),
              fps['image'] >> circle_display['input'],
              circle_drawer['out'] >> fps['image'],
              
              rgb2gray['out'] >> circle_detector['input'],
              circle_detector['out','found'] >> circle_drawer['points','found'],
              )
コード例 #37
0
#!/usr/bin/env python
import ecto

# import ecto_opencv.cv_bp as opencv
from ecto_opencv import highgui, calib, imgproc

debug = True

plasm = ecto.Plasm()
rows = 5
cols = 3
square_size = 0.03  # in millis

pattern_show = highgui.imshow(name="pattern", waitKey=10, autoSize=True)
rgb2gray = imgproc.cvtColor(flag=7)
video = highgui.VideoCapture(video_device=0)
circle_detector = calib.PatternDetector(rows=rows, cols=cols, pattern_type="acircles", square_size=square_size)
circle_drawer = calib.PatternDrawer(rows=rows, cols=cols)
poser = calib.FiducialPoseFinder()
pose_drawer = calib.PoseDrawer()
camera_intrinsics = calib.CameraIntrinsics(camera_file="camera.yml")
splitter = imgproc.ChannelSplitter()

plasm.connect(
    video["image"] >> (rgb2gray["input"], splitter["input"]),
    splitter["out_0"] >> (circle_detector["input"], circle_drawer["input"]),
)

plasm.connect(
    circle_detector["out"] >> circle_drawer["points"],
    circle_detector["found"] >> circle_drawer["found"],
コード例 #38
0
ファイル: openni_capture.py プロジェクト: Telpr2/capture
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired=72,
                                            orb_template='', res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []

    # try several parameter combinations
    source = create_source('image_pipeline', 'OpenNISource', outputs_list=['K', 'K', 'camera', 'image',
                                                                           'depth', 'points3d',
                                                                           'mask_depth'], res=res, fps=fps)

    # convert the image to grayscale
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [source['image'] >> rgb2gray[:] ]

    # Find planes
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    graph += [ # find the normals
                source['K', 'points3d'] >> compute_normals['K', 'points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                source['K', 'points3d'] >> plane_est['K', 'points3d'] ]

    if orb_template:
        # find the pose using ORB
        poser = OrbPoseEstimator(directory=orb_template, show_matches=orb_matches)
        graph += [ source['image', 'K', 'mask_depth', 'points3d'] >> poser['color_image', 'K', 'mask', 'points3d'],
                   rgb2gray[:] >> poser['image']
                 ]
    else:
        # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only
        poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
        graph += [ source['image', 'K'] >> poser['color_image', 'K'],
                   rgb2gray[:] >> poser['image'] ]

    # filter the previous pose and resolve the scale ambiguity using 3d
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ source['K'] >> pose_filter['K'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]

    # draw the found pose
    pose_drawer = calib.PoseDrawer('Pose Draw')
    display = highgui.imshow(name='Poses')
    graph += [ pose_filter['found'] >> pose_drawer['trigger'],
               poser['debug_image'] >> pose_drawer['image'],
               source['K'] >> pose_drawer['K'],
               pose_filter['R', 'T'] >> pose_drawer['R', 'T'],
               pose_drawer['output'] >> display[:] ]

    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))

    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]

    # publish the source data
    rgbMsg = Mat2Image(frame_id='/camera_rgb_optical_frame', swap_rgb=True)
    depthMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ source['depth'] >> depthMsg[:],
               source['image'] >> rgbMsg[:] ]

    # mask out the object
    masker = segmentation_cell
    graph += [ source['points3d'] >> masker['points3d'],
               plane_est['masks', 'planes'] >> masker['masks', 'planes'],
               pose_filter['T'] >> masker['T'] ]

    # publish the mask
    maskMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ masker['mask'] >> maskMsg[:] ]

    camera2cv = CameraModelToCv()
    cameraMsg = Cv2CameraInfo(frame_id='/camera_rgb_optical_frame')
    graph += [source['camera'] >> camera2cv['camera'],
              camera2cv['K', 'D', 'image_size'] >> cameraMsg['K', 'D', 'image_size']
              ]
    #display the mask
    mask_display = object_recognition_capture.MaskDisplay()
    mask_display_highgui = highgui.imshow(name='mask')
    graph += [
              masker['mask'] >> mask_display['mask'],
              source['image'] >> mask_display['image'],
              mask_display['image'] >> mask_display_highgui['image'],
            ]
    if not preview:
        baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   mask=ImageBagger(topic_name='/camera/mask'),
                   pose=PoseBagger(topic_name='/camera/pose'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )
        bagwriter = ecto.If('Bag Writer if R|T',
                            cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                            )

        graph += [
                  rgbMsg[:] >> bagwriter['image'],
                  depthMsg[:] >> bagwriter['depth'],
                  cameraMsg[:] >> (bagwriter['image_ci'], bagwriter['depth_ci']),
                  poseMsg['pose'] >> bagwriter['pose'],
                  maskMsg[:] >> bagwriter['mask'],
                  ]
        novel = delta_pose['novel']
        if use_turn_table:
            table = TurnTable(angle_thresh=angle_thresh)
            ander = ecto.And()
            graph += [
                  table['trigger'] >> (delta_pose['__test__'], ander['in2']),
                  delta_pose['novel'] >> ander['in1'],
                  ]
            novel = ander['out']
        else:
            delta_pose.inputs.__test__ = True

        graph += [novel >> (bagwriter['__test__'])]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
コード例 #39
0
import ecto
from ecto_opencv import highgui, calib, imgproc, cv_bp as cv
from object_recognition.observations import *
from ecto_object_recognition import capture

plasm = ecto.Plasm()

video_cap = highgui.VideoCapture(video_device=0, width=640, height=480)
camera_intrinsics = calib.CameraIntrinsics(camera_file='camera.yml')

poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                 pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                 square_size=0.04, debug=True
                                 )

bgr2rgb = imgproc.cvtColor('rgb -> bgr', flag=imgproc.RGB2BGR)
rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.RGB2GRAY)
display = highgui.imshow('Poses', name='Poses')

plasm.connect(
         video_cap['image'] >> rgb2gray[:], 
         rgb2gray[:] >> poser['image'],
         video_cap['image'] >> poser['color_image'],
         poser['debug_image'] >> display['image'],
         camera_intrinsics['K'] >> poser['K']
         )

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm, description='Estimate the pose of an opposing dot fiducial.')
コード例 #40
0
            depth=ImageSub(topic_name='/camera/depth_registered/image', queue_size=0),
            image_ci=CameraInfoSub(topic_name='/camera/rgb/camera_info', queue_size=0),
            depth_ci=CameraInfoSub(topic_name='/camera/depth_registered/camera_info', queue_size=0),
            )

sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                             )

im2mat_rgb = ecto_ros.Image2Mat('rgb -> cv::Mat')
camera_info = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
poser = OpposingDotPoseEstimator(plasm,
                                 rows=5, cols=3,
                                 pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                 square_size=0.04, debug=True)

bgr2rgb = imgproc.cvtColor('rgb -> bgr', flag=imgproc.Conversion.RGB2BGR)
rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
display = highgui.imshow('Poses', name='Poses', waitKey=5, autoSize=True)

graph = [sync['image'] >> im2mat_rgb[:],
          im2mat_rgb[:] >> (rgb2gray[:], bgr2rgb[:]),
          bgr2rgb[:] >> poser['color_image'],
          rgb2gray[:] >> poser['image'],
          poser['debug_image'] >> display['input'],
          sync['image_ci'] >> camera_info['camera_info'],
          camera_info['K'] >> poser['K'],
          ]
plasm.connect(graph)

if __name__ == '__main__':
    import sys
コード例 #41
0
ファイル: fsim.py プロジェクト: FlavioFalcao/sim
cols = 4
width = 0.279
height = 0.216
offset_x = -(width/2 - 0.06)
offset_y = -(height/2 - 0.020)

print "ox" , offset_x, "oy", offset_y


simulator = PlanarSim(image_name=board, width=width, height=height)

square_size = 0.02 # in meters, 2 cm
pattern_type = ASYMMETRIC_CIRCLES_GRID

pattern_show = imshow('Display', name='pattern')
rgb2gray = cvtColor('RGB -> Gray', flag=Conversion.RGB2GRAY)
circle_detector = PatternDetector(rows=rows, cols=cols,
                                  pattern_type=pattern_type,
                                  square_size=square_size,
                                  offset_x = offset_x,
                                  offset_y = offset_y
                                  )

circle_drawer = PatternDrawer(rows=rows, cols=cols)
poser = FiducialPoseFinder()
pose_drawer = PoseDrawer()
gt_drawer = PoseDrawer()
plasm = ecto.Plasm()
plasm.connect(simulator['image'] >> (rgb2gray['image'], circle_drawer['input']),
            rgb2gray['image'] >> circle_detector['input'],
            circle_detector['out', 'found'] >> circle_drawer['points', 'found'],
コード例 #42
0
ファイル: orb_sample.py プロジェクト: mkrainin/ecto_opencv
#!/usr/bin/env python
import ecto
from ecto_opencv import imgproc, highgui, features2d

plasm = ecto.Plasm()

video = highgui.VideoCapture(video_device=0)
orb_m = features2d.ORB(n_features=2500)
draw_kpts = features2d.DrawKeypoints()
orb_display = highgui.imshow('orb display', name="ORB", waitKey=5, autoSize=True)
rgb2gray = imgproc.cvtColor (flag=imgproc.Conversion.RGB2GRAY)
fps = highgui.FPSDrawer()

plasm.connect(video["image"] >> rgb2gray ["input"],
                rgb2gray["out"] >> orb_m["image"],
                orb_m["kpts"] >> draw_kpts["kpts"],
                video["image"] >> draw_kpts["input"],
                draw_kpts["output"] >> fps[:],
                fps[:] >> orb_display["input"],
              )

if __name__ == '__main__':
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()
コード例 #43
0
ファイル: normal.py プロジェクト: plasmodic/ecto_opencv
    parser = argparse.ArgumentParser(description='Find a plane in an RGBD image.')
    scheduler_options(parser.add_argument_group('Scheduler'))
    options = parser.parse_args()

    return options


if __name__ == '__main__':
    options = parse_args()

    plasm = ecto.Plasm()

    #setup the input source, grayscale conversion
    from ecto_openni import SXGA_RES, FPS_15, VGA_RES, FPS_30
    source = create_source('image_pipeline','OpenNISource',image_mode=VGA_RES,image_fps=FPS_30)
    rgb2gray = cvtColor('Grayscale', flag=Conversion.RGB2GRAY)
    depth_to_3d = DepthTo3d()
    compute_normals = {}
    draw_normals = {}
    normal_types = [ RgbdNormalsTypes.LINEMOD, RgbdNormalsTypes.FALS, RgbdNormalsTypes.SRI ]
    normal_types = [ RgbdNormalsTypes.FALS]
    for type in normal_types:
        compute_normals[type] = ComputeNormals(method=type)
        draw_normals[type] = DrawNormals(step=20)

    #plasm.connect(source['image'] >> rgb2gray ['image'])

    #connect up the pose_est
    connections = [ source['depth'] >> depth_to_3d['depth'],
                    source['K_depth'] >> depth_to_3d['K'],
                    source['image'] >> imshow(name='original',waitKey=1)[:]
コード例 #44
0
    import argparse
    parser = argparse.ArgumentParser(description='Test orb on images.')
    parser.add_argument('-i,--input',
                        dest='input',
                        help='The input dir. %(default)s',
                        default='./images')
    scheduler_options(parser.add_argument_group('Scheduler'))
    options = parser.parse_args()
    return options


options = parse_args()

images = ImageReader(path=options.input)
orb_m = ORB(n_features=5000, n_levels=10)
rgb2gray = cvtColor(flag=Conversion.RGB2GRAY)
_stats = ORBstats()
stats = ecto.If(cell=_stats)
stats.inputs.__test__ = False
accum = DescriptorAccumulator()
plasm = ecto.Plasm()
plasm.connect(
    images['image'] >> rgb2gray['image'],
    rgb2gray['image'] >> orb_m['image'],
    orb_m['descriptors'] >> accum[:],
    accum[:] >> stats['descriptors'],
)

run_plasm(options, plasm, locals=vars())
_stats.process()
hist = _stats.outputs.distances.toarray()
コード例 #45
0
rows = 11
cols = 4
width = 0.279
height = 0.216
offset_x = -(width / 2 - 0.06)
offset_y = -(height / 2 - 0.020)

print "ox", offset_x, "oy", offset_y

simulator = PlanarSim(image_name=board, width=width, height=height)

square_size = 0.02  # in meters, 2 cm
pattern_type = ASYMMETRIC_CIRCLES_GRID

pattern_show = imshow('Display', name='pattern')
rgb2gray = cvtColor('RGB -> Gray', flag=Conversion.RGB2GRAY)
circle_detector = PatternDetector(rows=rows,
                                  cols=cols,
                                  pattern_type=pattern_type,
                                  square_size=square_size,
                                  offset_x=offset_x,
                                  offset_y=offset_y)

circle_drawer = PatternDrawer(rows=rows, cols=cols)
poser = FiducialPoseFinder()
pose_drawer = PoseDrawer()
gt_drawer = PoseDrawer()
plasm = ecto.Plasm()
plasm.connect(
    simulator['image'] >> (rgb2gray['image'], circle_drawer['input']),
    rgb2gray['image'] >> circle_detector['input'],