Exemple #1
0
def do_ecto():
    baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   )

    bagreader = ecto_ros.BagReader('Bag Ripper',
                                    baggers=baggers,
                                    bag=sys.argv[1],
                                  )
    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()

    graph = [
                bagreader["image"] >> im2mat_rgb["image"],
                im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb")[:],
                bagreader["depth"] >> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show", name="depth")[:]
            ]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)

    sched = ecto.Scheduler(plasm)
    sched.execute()
Exemple #2
0
def do_ecto():
    #Set up the ecto_ros subscribers, with a dict of output name -> subscriber cell
    subs = dict(image=ImageSub(topic_name='image',queue_size=0),
                depth=ImageSub(topic_name='depth',queue_size=0),
                )
    #The synchronizer expects this dict, and will have an output foreach key:value pair
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    #some rosmsg ~> opencv type converters
    rgb = ecto_ros.Image2Mat()
    depth = ecto_ros.Image2Mat()
    
    #declare a graph that just shows the images using opencv highgui
    graph = [
                sync["image"] >> rgb["image"],
                sync["depth"] >> depth["image"],
                rgb["image"] >> imshow(name="rgb")["image"],
                depth["image"] >> imshow(name="depth")["image"]
            ]
    
    #add the graph to our ecto plasm
    plasm = ecto.Plasm()
    plasm.connect(graph)
    
    #We'll use a threadpool based scheduler to execute this one.
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute() #execute forever
Exemple #3
0
def do_ecto():

    baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_mono'),
                   depth=ImageBagger(topic_name='/camera/depth/image_raw'),
                   )
    
    bagwriter = ecto_ros.BagWriter('Bag Writer', baggers=baggers,
                                            bag=sys.argv[1],
                                            )
    subs = dict( image=ImageSub(topic_name='/camera/rgb/image_mono',queue_size=0),
                depth=ImageSub(topic_name='/camera/depth/image_raw',queue_size=0),
             )
    
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                                 )

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()
    
    graph = [
                sync["image"] >> im2mat_rgb["image"],
                im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb", waitKey=5)[:],
                sync[:] >> bagwriter[:],
                sync["depth"] >> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1)[:]
            ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)
    
    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute(niter=30)#capture a second.
Exemple #4
0
def do_ecto():
    baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   )
    
    bagreader = ecto_ros.BagReader('Bag Ripper',
                                    baggers=baggers,
                                    bag=sys.argv[1],
                                  )
    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()
    
    graph = [
                bagreader["image"] >> im2mat_rgb["image"],
                im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb", waitKey=5)[:],
                bagreader["depth"] >> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1)[:]
            ]
    
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)
    
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()
Exemple #5
0
def do_ecto():
    #Set up the ecto_ros subscribers, with a dict of output name -> subscriber cell
    subs = dict(
        image=ImageSub(topic_name='image', queue_size=0),
        depth=ImageSub(topic_name='depth', queue_size=0),
    )
    #The synchronizer expects this dict, and will have an output foreach key:value pair
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    #some rosmsg ~> opencv type converters
    rgb = ecto_ros.Image2Mat()
    depth = ecto_ros.Image2Mat()

    #declare a graph that just shows the images using opencv highgui
    graph = [
        sync["image"] >> rgb["image"], sync["depth"] >> depth["image"],
        rgb["image"] >> imshow(name="rgb")["image"],
        depth["image"] >> imshow(name="depth")["image"]
    ]

    #add the graph to our ecto plasm
    plasm = ecto.Plasm()
    plasm.connect(graph)

    #We'll use a threadpool based scheduler to execute this one.
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()  #execute forever
Exemple #6
0
def do_ecto():
    baggers = dict(
        image=ImageBagger(topic_name='/camera/rgb/image_mono'),
        depth=ImageBagger(topic_name='/camera/depth/image_raw'),
    )

    bagwriter = ecto_ros.BagWriter(
        'Bag Writer',
        baggers=baggers,
        bag=sys.argv[1],
    )
    subs = dict(
        image=ImageSub(topic_name='/camera/rgb/image_mono', queue_size=0),
        depth=ImageSub(topic_name='/camera/depth/image_raw', queue_size=0),
    )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()

    graph = [
        sync["image"] >> im2mat_rgb["image"],
        im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb")[:],
        sync[:] >> bagwriter[:], sync["depth"] >> im2mat_depth["image"],
        im2mat_depth["image"] >> highgui.imshow("depth show", name="depth")[:]
    ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)

    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute(niter=30)  #capture a second.
Exemple #7
0
def do_ecto():

    subs = dict( image=ImageSub(topic_name='/camera/rgb/image_color',queue_size=0),
                depth=ImageSub(topic_name='/camera/depth/image',queue_size=0),
                depth_info=CameraInfoSub(topic_name='/camera/depth/camera_info',queue_size=0),
                image_info=CameraInfoSub(topic_name='/camera/rgb/camera_info',queue_size=0),
             )
    
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                                 )

    drift_printer = ecto_ros.DriftPrinter()
    
    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()
    
    graph = [
                sync["image"] >> im2mat_rgb["image"],
                im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb", waitKey=5)[:],
                sync[:] >> drift_printer[:],
                sync["depth"] >> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1)[:]
            ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)
    
    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()
Exemple #8
0
    def connections(self):
        graph = []
        # connect the input
        graph += [
            self.source['image'] >> self.feature_descriptor['image'],
            self.source['image'] >> self.rescale_depth['image'],
            self.source['mask'] >> self.feature_descriptor['mask'],
            self.source['depth'] >> self.rescale_depth['depth'],
            self.source['K'] >> self.depth_to_3d_sparse['K']
        ]

        # Make sure the keypoints are in the mask and with a valid depth
        graph += [
            self.feature_descriptor['keypoints', 'descriptors'] >>
            self.keypoint_validator['keypoints', 'descriptors'],
            self.source['K'] >> self.keypoint_validator['K'],
            self.source['mask'] >> self.keypoint_validator['mask'],
            self.rescale_depth['depth'] >> self.keypoint_validator['depth']
        ]

        # transform the keypoints/depth into 3d points
        graph += [
            self.keypoint_validator['points'] >>
            self.depth_to_3d_sparse['points'],
            self.rescale_depth['depth'] >> self.depth_to_3d_sparse['depth'],
            self.source['R'] >> self.camera_to_world['R'],
            self.source['T'] >> self.camera_to_world['T'],
            self.depth_to_3d_sparse['points3d'] >>
            self.camera_to_world['points']
        ]

        # store all the info
        graph += [
            self.camera_to_world['points'] >> self.model_stacker['points3d'],
            self.keypoint_validator['points'] >> self.model_stacker['points'],
            self.keypoint_validator['descriptors'] >>
            self.model_stacker['descriptors'],
            self.source['K', 'R', 'T'] >> self.model_stacker['K', 'R', 'T'],
        ]

        if self.visualize:
            mask_view = highgui.imshow(name="mask")
            depth_view = highgui.imshow(name="depth")

            graph += [
                self.source['mask'] >> mask_view['image'],
                self.source['depth'] >> depth_view['image']
            ]
            # draw the keypoints
            keypoints_view = highgui.imshow(name="Keypoints")
            draw_keypoints = features2d.DrawKeypoints()
            graph += [
                self.source['image'] >> draw_keypoints['image'],
                self.feature_descriptor['keypoints'] >>
                draw_keypoints['keypoints'],
                draw_keypoints['image'] >> keypoints_view['image']
            ]

        return graph
def mesh_session(session, args):
    db_reader = capture.ObservationReader('db_reader', session_id=session.id)
    depthTo3d = calib.DepthTo3d()
    surfel_reconstruction = reconstruction.SurfelReconstruction(corrDistForUpdate=0.02,
                                                                maxInterpolationDist=0.04,
                                                                starvationConfidence=2,
                                                                timeDiffForRemoval=25,
                                                                maxNormalAngle=90 * math.pi / 180)
    if True:
        plasm = ecto.Plasm()
        plasm.connect(
                      db_reader['K', 'depth'] >> depthTo3d['K', 'depth'],
                      depthTo3d['points3d'] >> surfel_reconstruction['points3d'],
                      db_reader['K', 'R', 'T', 'image', 'mask'] >> surfel_reconstruction['K', 'R', 'T', 'image', 'mask'],
                      )
        if args.visualize:
            plasm.connect(
                      db_reader['image'] >> highgui.imshow('image', name='image', waitKey=10, autoSize=True)[:],
                      db_reader['depth'] >> highgui.imshow('depth', name='depth', waitKey= -1, autoSize=True)[:],
                      db_reader['mask'] >> highgui.imshow('mask', name='mask', waitKey= -1, autoSize=True)[:],
                      )
        sched = ecto.schedulers.Singlethreaded(plasm)
        sched.execute()

    if True:
        location = '/tmp/object_recognition/'
        mesh_script_txt = '''<!DOCTYPE FilterScript>
<FilterScript>
 <filter name="Surface Reconstruction: Poisson">
  <Param type="RichInt" value="8" name="OctDepth"/>
  <Param type="RichInt" value="8" name="SolverDivide"/>
  <Param type="RichFloat" value="2" name="SamplesPerNode"/>
  <Param type="RichFloat" value="1" name="Offset"/>
 </filter>
</FilterScript>     
        '''
        mesh_file_name = os.path.join(location, 'meshing.mlx')
        with open(mesh_file_name, 'w') as mesh_script:
            mesh_script.write(mesh_script_txt)
        try:
            os.makedirs(location)
        except Exception, e:
            pass
        name_base = str(session.id)
        surfel_ply = os.path.join(location, name_base + '.ply')
        print "Saving to :", surfel_ply
        surfel_saver = reconstruction.SurfelToPly(filename=surfel_ply)
        surfel_saver.inputs.at('model').copy_value(surfel_reconstruction.outputs.at('model'))
        surfel_saver.inputs.at('params').copy_value(surfel_reconstruction.outputs.at('params'))
        surfel_saver.inputs.at('camera_params').copy_value(surfel_reconstruction.outputs.at('camera_params'))

        surfel_saver.configure()
        surfel_saver.process() #manually thunk the cell's process function
Exemple #10
0
    def connections(self, p):
        # Rescale the depth image and convert to 3d
        graph = [ self.passthrough['image'] >> self._depth_map['image'],
                  self._depth_map['depth'] >> self._points3d['depth'],
                  self.passthrough['K'] >> self._points3d['K'],
                  self._points3d['points3d'] >> self.guess_generator['points3d'] ]
        # make sure the inputs reach the right cells
        if 'depth' in self.feature_descriptor.inputs.keys():
            graph += [ self._depth_map['depth'] >> self.feature_descriptor['depth']]

        graph += [ self.passthrough['image'] >> self.feature_descriptor['image'],
                   self.passthrough['image'] >> self.guess_generator['image'] ]

        graph += [ self.descriptor_matcher['spans'] >> self.guess_generator['spans'],
                   self.descriptor_matcher['object_ids'] >> self.guess_generator['object_ids'] ]

        graph += [ self.feature_descriptor['keypoints'] >> self.guess_generator['keypoints'],
                   self.feature_descriptor['descriptors'] >> self.descriptor_matcher['descriptors'],
                   self.descriptor_matcher['matches', 'matches_3d'] >> self.guess_generator['matches', 'matches_3d'] ]

        cvt_color = imgproc.cvtColor(flag=imgproc.RGB2GRAY)

        if p.visualize or ECTO_ROS_FOUND:
            draw_keypoints = features2d.DrawKeypoints()
            graph += [ self.passthrough['image'] >> cvt_color[:],
                           cvt_color[:] >> draw_keypoints['image'],
                           self.feature_descriptor['keypoints'] >> draw_keypoints['keypoints']
                           ]

        if p.visualize:
            # visualize the found keypoints
            image_view = highgui.imshow(name="RGB")
            keypoints_view = highgui.imshow(name="Keypoints")

            graph += [ self.passthrough['image'] >> image_view['image'],
                       draw_keypoints['image'] >> keypoints_view['image']
                           ]

            pose_view = highgui.imshow(name="Pose")
            pose_drawer = calib.PosesDrawer()

            # draw the poses
            graph += [ self.passthrough['image', 'K'] >> pose_drawer['image', 'K'],
                       self.guess_generator['Rs', 'Ts'] >> pose_drawer['Rs', 'Ts'],
                       pose_drawer['output'] >> pose_view['image'] ]

        if ECTO_ROS_FOUND:
            ImagePub = ecto_sensor_msgs.Publisher_Image
            pub_features = ImagePub("Features Pub", topic_name='features')
            graph += [ draw_keypoints['image'] >> self.message_cvt[:],
                       self.message_cvt[:] >> pub_features[:] ]

        return graph
    def connections(self):
        graph = []
        # connect the input
        graph += [
            self.source["image"] >> self.feature_descriptor["image"],
            self.source["image"] >> self.rescale_depth["image"],
            self.source["mask"] >> self.feature_descriptor["mask"],
            self.source["depth"] >> self.rescale_depth["depth"],
            self.source["K"] >> self.depth_to_3d_sparse["K"],
        ]

        # Make sure the keypoints are in the mask and with a valid depth
        graph += [
            self.feature_descriptor["keypoints", "descriptors"] >> self.keypoint_validator["keypoints", "descriptors"],
            self.source["K"] >> self.keypoint_validator["K"],
            self.source["mask"] >> self.keypoint_validator["mask"],
            self.rescale_depth["depth"] >> self.keypoint_validator["depth"],
        ]

        # transform the keypoints/depth into 3d points
        graph += [
            self.keypoint_validator["points"] >> self.depth_to_3d_sparse["points"],
            self.rescale_depth["depth"] >> self.depth_to_3d_sparse["depth"],
            self.source["R"] >> self.camera_to_world["R"],
            self.source["T"] >> self.camera_to_world["T"],
            self.depth_to_3d_sparse["points3d"] >> self.camera_to_world["points"],
        ]

        # store all the info
        graph += [
            self.camera_to_world["points"] >> self.model_stacker["points3d"],
            self.keypoint_validator["points"] >> self.model_stacker["points"],
            self.keypoint_validator["descriptors"] >> self.model_stacker["descriptors"],
            self.source["K", "R", "T"] >> self.model_stacker["K", "R", "T"],
        ]

        if self.visualize:
            mask_view = highgui.imshow(name="mask")
            depth_view = highgui.imshow(name="depth")

            graph += [self.source["mask"] >> mask_view["image"], self.source["depth"] >> depth_view["image"]]
            # draw the keypoints
            keypoints_view = highgui.imshow(name="Keypoints")
            draw_keypoints = features2d.DrawKeypoints()
            graph += [
                self.source["image"] >> draw_keypoints["image"],
                self.feature_descriptor["keypoints"] >> draw_keypoints["keypoints"],
                draw_keypoints["image"] >> keypoints_view["image"],
            ]

        return graph
Exemple #12
0
def calibration(rows, cols, square_size, pattern_type, n_obs, video):
    plasm = ecto.Plasm()
    pattern_show = highgui.imshow(name="pattern", waitKey=10, autoSize=True)
    rgb2gray = imgproc.cvtColor(flag=7)
    circle_detector = calib.PatternDetector(rows=rows,
                                            cols=cols,
                                            pattern_type=pattern_type,
                                            square_size=square_size)
    ecto.print_module_doc(circle_detector)
    circle_drawer = calib.PatternDrawer(rows=rows, cols=cols)
    camera_calibrator = calib.CameraCalibrator(output_file_name="camera.yml",
                                               n_obs=n_obs)

    plasm.connect(video, "image", rgb2gray, "input")
    plasm.connect(rgb2gray, "out", circle_detector, "input")
    plasm.connect(video, "image", circle_drawer, "input")
    plasm.connect(video, "image", camera_calibrator, "image")
    plasm.connect(circle_detector, "out", circle_drawer, "points")
    plasm.connect(circle_detector, "found", circle_drawer, "found")
    plasm.connect(circle_drawer, "out", pattern_show, "input")
    plasm.connect(circle_detector, "ideal", camera_calibrator, "ideal")
    plasm.connect(circle_detector, "out", camera_calibrator, "points")
    plasm.connect(circle_detector, "found", camera_calibrator, "found")

    print plasm.viz()
    ecto.view_plasm(plasm)

    while (pattern_show.outputs.out != 27
           and camera_calibrator.outputs.calibrated == False):
        plasm.execute(1)
 def __init__(self,
              plasm,
              rows,
              cols,
              pattern_type,
              square_size,
              debug=True):
     ecto.BlackBox.__init__(self, plasm)
     self.video_cap = ecto.Passthrough('Image Input')
     self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
     self.circle_detector = calib.PatternDetector('Dot Detector',
                                                  rows=rows,
                                                  cols=cols,
                                                  pattern_type=pattern_type,
                                                  square_size=square_size)
     self.pose_calc = calib.FiducialPoseFinder('Pose Calc')
     self.camera_info = calib.CameraIntrinsics('Camera Info',
                                               camera_file="camera.yml")
     self.debug = debug
     if self.debug:
         self.fps = highgui.FPSDrawer()
         self.circle_drawer = calib.PatternDrawer('Circle Draw',
                                                  rows=rows,
                                                  cols=cols)
         self.circle_display = highgui.imshow('Pattern show',
                                              name='Pattern',
                                              waitKey=2,
                                              autoSize=True)
         self.pose_draw = calib.PoseDrawer('Pose Draw')
Exemple #14
0
def calibration(rows,cols,square_size,pattern_type,n_obs,video):
    plasm = ecto.Plasm()
    pattern_show = highgui.imshow(name="pattern", waitKey=10, autoSize=True)
    rgb2gray = imgproc.cvtColor(flag=7)
    circle_detector = calib.PatternDetector(rows=rows, cols=cols,pattern_type=pattern_type,square_size=square_size )
    ecto.print_module_doc(circle_detector)
    circle_drawer = calib.PatternDrawer(rows=rows, cols=cols)
    camera_calibrator = calib.CameraCalibrator(output_file_name="camera.yml",n_obs=n_obs)
    
    plasm.connect(video, "image", rgb2gray, "input")
    plasm.connect(rgb2gray, "out", circle_detector, "input")
    plasm.connect(video, "image", circle_drawer, "input")
    plasm.connect(video, "image", camera_calibrator, "image")
    plasm.connect(circle_detector, "out", circle_drawer, "points")
    plasm.connect(circle_detector, "found", circle_drawer, "found")
    plasm.connect(circle_drawer, "out", pattern_show, "input")
    plasm.connect(circle_detector, "ideal", camera_calibrator,"ideal")
    plasm.connect(circle_detector, "out", camera_calibrator,"points")
    plasm.connect(circle_detector, "found", camera_calibrator, "found")
    
    print plasm.viz()
    ecto.view_plasm(plasm)
    
    while(pattern_show.outputs.out != 27 and camera_calibrator.outputs.calibrated == False):
        plasm.execute(1)
Exemple #15
0
def make_calib(images, plasm, calibfname):
    rgb2gray = imgproc.cvtColor(flag=imgproc.RGB2GRAY)
    plasm.connect(images["image"] >> rgb2gray[:])

    detect = calib.PatternDetector(rows=5, cols=3,
                                   pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                   square_size=0.04)
    plasm.connect(rgb2gray["out"] >> detect["input"])

    draw = calib.PatternDrawer(rows=5, cols=3)
    plasm.connect(detect["out"] >> draw["points"],
                  detect["found"] >> draw["found"],
                  images["image"] >> draw["input"])

    fps = highgui.FPSDrawer()
    plasm.connect(draw[:] >> fps[:])

    calibcell = calib.CameraCalibrator(output_file_name=calibfname + '.yml',
                                       n_obs=75,
                                       quit_when_calibrated=False)

    plasm.connect(detect["ideal"] >> calibcell["ideal"],
                  detect["out"] >> calibcell["points"],
                  detect["found"] >> calibcell["found"],
                  images["image"] >> calibcell["image"])
    pattern_show = highgui.imshow(name=calibfname, waitKey=10)
    plasm.connect(fps[:] >> pattern_show["input"])
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired,
                                            orb_template='', res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []
    
#   Create ros node, subscribing to openni topics
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(argv)
    ecto_ros.init(argv, "object_capture_msd_pcl", False)
    cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points') 
    rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_raw')
    depth_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("depth_image_sub", topic_name='/camera/depth_registered/image_raw')  
    rgb_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("rgb_camera_info_sub", topic_name='/camera/rgb/camera_info')
    depth_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("depth_camera_info_sub", topic_name='/camera/depth_registered/camera_info')
        
    
#   Converte ros topics to cv types
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
    image2cv = ecto_ros.Image2Mat()
    depth2cv = ecto_ros.Image2Mat()
    rgb_info2cv = ecto_ros.CameraInfo2Cv()
    depth_info2cv = ecto_ros.CameraInfo2Cv()


    graph += [ cloud_sub['output'] >> msg2cloud[:],
               rgb_image_sub[:] >> image2cv[:],
               depth_image_sub[:] >> depth2cv[:],
               rgb_camera_info_sub[:] >> rgb_info2cv[:],
               depth_camera_info_sub[:] >> depth_info2cv[:]
                ]
                                                        
    
     #display the mask

    display = highgui.imshow(name='Poses')
    graph += [image2cv[:] >> display[:],
            ]
    
    # convert the image to grayscale                                                                      
#    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
#    graph += [image2cv[:]  >> rgb2gray[:] ]
    
        # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only           
    poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
    graph += [ image2cv[:] >> poser['color_image'],
               rgb_info2cv['K'] >> poser['K_image'],
               image2cv[:] >> poser['image'] ] 
    
    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
Exemple #17
0
    def connections(self, p):
        graph = []
        # connect the input
        if 'depth' in self.feature_descriptor.inputs.keys():
            graph += [self.rescale_depth['depth'] >> self.feature_descriptor['depth']]
        graph += [self.source['image'] >> self.feature_descriptor['image'],
                           self.source['image'] >> self.rescale_depth['image'],
                           self.source['mask'] >> self.feature_descriptor['mask'],
                           self.source['depth'] >> self.rescale_depth['depth'],
                           self.source['K'] >> self.depth_to_3d_sparse['K']]

        # Make sure the keypoints are in the mask and with a valid depth
        graph += [self.feature_descriptor['keypoints', 'descriptors'] >> 
                            self.keypoint_validator['keypoints', 'descriptors'],
                            self.source['K'] >> self.keypoint_validator['K'],
                            self.source['mask'] >> self.keypoint_validator['mask'],
                            self.rescale_depth['depth'] >> self.keypoint_validator['depth'] ]

        # transform the keypoints/depth into 3d points
        graph += [ self.keypoint_validator['points'] >> self.depth_to_3d_sparse['points'],
                        self.rescale_depth['depth'] >> self.depth_to_3d_sparse['depth'],
                        self.source['R'] >> self.camera_to_world['R'],
                        self.source['T'] >> self.camera_to_world['T'],
                        self.depth_to_3d_sparse['points3d'] >> self.camera_to_world['points']]

        # store all the info
        graph += [ self.camera_to_world['points'] >> self.model_stacker['points3d'],
                        self.keypoint_validator['points', 'descriptors', 'disparities'] >> 
                                                            self.model_stacker['points', 'descriptors', 'disparities'],
                        self.source['K', 'R', 'T'] >> self.model_stacker['K', 'R', 'T'],
                        ]

        if self.visualize:
            mask_view = highgui.imshow(name="mask")
            depth_view = highgui.imshow(name="depth")

            graph += [ self.source['mask'] >> mask_view['image'],
                       self.source['depth'] >> depth_view['image']]
            # draw the keypoints
            keypoints_view = highgui.imshow(name="Keypoints")
            draw_keypoints = features2d.DrawKeypoints()
            graph += [ self.source['image'] >> draw_keypoints['image'],
                       self.feature_descriptor['keypoints'] >> draw_keypoints['keypoints'],
                       draw_keypoints['image'] >> keypoints_view['image']]

        return graph
Exemple #18
0
 def connections(self):
     connections = [self._sync["image"] >> self._im2mat_rgb["image"],
               self._sync["depth"] >> self._im2mat_depth["image"],
               self._sync["image_info"] >> self._camera_info['camera_info'],
               self._camera_info['K'] >> self._depth_to_3d['K'],
               self._im2mat_depth['image'] >> self._depth_to_3d['depth']
               ]
     if self._debug:
         connections += [self._im2mat_depth[:] >> highgui.imshow(name='kinect depth',waitKey=10)[:]]
     return connections
def do_ecto():
    sub_rgb = ImageSub("image_sub",topic_name='/camera/rgb/image_mono')
    sub_depth = ImageSub("depth_sub",topic_name='/camera/depth/image')

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()

    graph = [
                sub_rgb["output"]>>im2mat_rgb["image"],
                im2mat_rgb["image"] >> highgui.imshow("rgb show",name="rgb")[:],
                sub_depth["output"]>> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show",name="depth")[:]
            ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)
    
    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()
Exemple #20
0
def do_ecto():
    sub_rgb = ImageSub("image_sub", topic_name='/camera/rgb/image_mono')
    sub_depth = ImageSub("depth_sub", topic_name='/camera/depth/image')

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()

    graph = [
        sub_rgb["output"] >> im2mat_rgb["image"],
        im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb")[:],
        sub_depth["output"] >> im2mat_depth["image"],
        im2mat_depth["image"] >> highgui.imshow("depth show", name="depth")[:]
    ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)

    sched = ecto.Scheduler(plasm)
    sched.execute()
def connect_observation_calc(sync, commit, object_id, session_id, debug=False):
    plasm = ecto.Plasm()
    depth_ci = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
    image_ci = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')

    image = ecto_ros.Image2Mat()
    depth = ecto_ros.Image2Mat()
    #conversions
    plasm.connect(
                  sync["image"] >> image["image"],
                  sync["depth"] >> depth['image'],
                  sync['image_ci'] >> image_ci[:],
                  sync['depth_ci'] >> depth_ci[:]
                  )

    rgb = imgproc.cvtColor('bgr -> rgb', flag=imgproc.Conversion.BGR2RGB)
    gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)

    calc_observations = CalcObservations(plasm)
    rescale_depth = capture.RescaledRegisteredDepth() #this is for SXGA mode scale handling.
    plasm.connect(image[:] >> (rgb[:], gray[:]),
                  gray[:] >> (calc_observations['image'], rescale_depth['image']),
                  depth[:] >> rescale_depth['depth'],
                  rescale_depth['depth'] >> calc_observations['depth'],
                  image_ci['K'] >> calc_observations['K'],
                  )
    if debug:
        image_display = highgui.imshow('image display', name='image', waitKey=10, autoSize=True)
        mask_display = highgui.imshow('mask display', name='mask', waitKey= -1, autoSize=True)
        plasm.connect(rgb[:] >> image_display[:])
        plasm.connect(calc_observations['mask'] >> mask_display[:])


    if commit:
        db_inserter = capture.ObservationInserter("db_inserter", object_id=object_id, session_id=session_id)
        plasm.connect(depth[:] >> db_inserter['depth'],
                  calc_observations['R', 'T', 'mask', 'novel'] >> db_inserter['R', 'T', 'mask', 'found'],
                  rgb[:] >> db_inserter['image'],
                  image_ci['K'] >> db_inserter['K'],
                  )
    return plasm
Exemple #22
0
    def connections(self, _p):
        train = self.train
        orb = self.orb
        graph = [
            self.gray_image[:] >> orb['image'],
            self.points3d[:] >> orb['points3d'],
            self.depth_mask[:] >> orb['points3d_mask']
        ]

        matcher = Matcher()
        #        if self.use_lsh:
        #           matcher = self.lsh
        graph += [
            orb['descriptors'] >> matcher['test'],
            train['descriptors'] >> matcher['train'],
        ]

        #3d estimation
        pose_estimation = self.pose_estimation
        graph += [
            matcher['matches'] >> pose_estimation['matches'],
            orb['points', 'points3d'] >> pose_estimation['test_2d', 'test_3d'],
            train['points', 'points3d'] >> pose_estimation['train_2d',
                                                           'train_3d']
        ]

        if self.show_matches:
            #display matches
            match_drawer = DrawMatches()
            graph += [
                pose_estimation['matches'] >> match_drawer['matches'],
                pose_estimation['matches_mask'] >>
                match_drawer['matches_mask'],
                orb['points'] >> match_drawer['test'],
                train['points'] >> match_drawer['train'],
                self.rgb_image[:] >> match_drawer['test_image'],
                train['image'] >> match_drawer['train_image'],
                match_drawer['output'] >> imshow(name='Matches')['']
            ]

        tr = self.tr
        fps = self.fps
        pose_draw = PoseDrawer()
        graph += [
            train['R', 'T'] >> tr['R1', 'T1'],
            pose_estimation['R', 'T'] >> tr['R2', 'T2'],
            tr['R', 'T'] >> pose_draw['R', 'T'],
            pose_estimation['found'] >> pose_draw['trigger'],
            self.K_image[:] >> pose_draw['K'],
            self.rgb_image[:] >> pose_draw['image'],
            pose_draw['output'] >> fps[:],
        ]
        return graph
Exemple #23
0
    def connections(self):
        # make sure the inputs reach the right cells
        connections = [self.passthrough['image'] >> self.feature_descriptor['image'],
                       self.passthrough['image'] >> self.guess_generator['image'], ]

        connections += [ self.descriptor_matcher['spans'] >> self.guess_generator['spans'],
                       self.descriptor_matcher['object_ids'] >> self.guess_generator['object_ids'] ]

        connections += [ self.feature_descriptor['keypoints'] >> self.guess_generator['keypoints'],
                self.feature_descriptor['descriptors'] >> self.descriptor_matcher['descriptors'],
                self.descriptor_matcher['matches', 'matches_3d'] >> self.guess_generator['matches', 'matches_3d'] ]

        pub_features = ImagePub("Features Pub", topic_name='features')
        cvt_color = imgproc.cvtColor(flag=imgproc.RGB2GRAY)

        draw_keypoints = features2d.DrawKeypoints()
        connections += [ self.passthrough['image'] >> cvt_color[:],
                           cvt_color[:] >> draw_keypoints['image'],
                           self.feature_descriptor['keypoints'] >> draw_keypoints['keypoints'],
                           draw_keypoints['image'] >> self.message_cvt[:],
                           self.message_cvt[:] >> pub_features[:] ]

        if self._visualize:
            # visualize the found keypoints
            image_view = highgui.imshow(name="RGB")
            keypoints_view = highgui.imshow(name="Keypoints")

            connections += [ self.passthrough['image'] >> image_view['image'],
                           draw_keypoints['image'] >> keypoints_view['image']
                           ]

            pose_view = highgui.imshow(name="Pose")
            pose_drawer = calib.PosesDrawer()

            # draw the poses
            connections += [ self.passthrough['image', 'K'] >> pose_drawer['image', 'K'],
                              self.guess_generator['Rs', 'Ts'] >> pose_drawer['Rs', 'Ts'],
                              pose_drawer['output'] >> pose_view['image'] ]

        return connections
Exemple #24
0
 def __init__(self, plasm):
     ecto.BlackBox.__init__(self, plasm)
     self.video_cap = highgui.VideoCapture(video_device=0)
     self.fps = highgui.FPSDrawer()
     self.rgb2gray = imgproc.cvtColor("rgb -> gray", flag=7)
     self.circle_detector = calib.PatternDetector(
         "Dot Detector", rows=7, cols=3, pattern_type="acircles", square_size=0.03
     )
     self.circle_drawer = calib.PatternDrawer("Circle Draw", rows=7, cols=3)
     self.circle_display = highgui.imshow("Pattern show", name="Pattern", waitKey=2, maximize=True)
     self.pose_calc = calib.FiducialPoseFinder("Pose Calc")
     self.pose_draw = calib.PoseDrawer("Pose Draw")
     self.camera_info = calib.CameraIntrinsics("Camera Info", camera_file="camera.yml")
def create_capture_plasm(bag_name):
    '''
    Creates a plasm that will capture openni data into a bag.
    '''
    # bag writing
    baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )
    #conditional writer
    bagwriter = ecto.If('Bag Writer if \'s\'',
                        cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                        )

    subs = dict(image=ImageSub(topic_name='/camera/rgb/image_color', queue_size=0),
                image_ci=CameraInfoSub(topic_name='/camera/rgb/camera_info', queue_size=0),
                depth=ImageSub(topic_name='/camera/depth_registered/image', queue_size=0),
                cloud=PointCloudSub(topic_name='/camera/depth_registered/points', queue_size=0),
                depth_ci=CameraInfoSub(topic_name='/camera/depth_registered/camera_info', queue_size=0),
                )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                                 )

    graph = [
                sync['image','depth','image_ci','depth_ci'] >> bagwriter['image','depth','image_ci','depth_ci'],
            ]

    #point cloud stuff
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
    example = easy_capture.ExampleFilter()
    graph += [
        sync['cloud'] >> msg2cloud[:],
        msg2cloud[:] >> example[:],
    ]

    #use opencv highgui to display an image.
    im2mat_rgb = ecto_ros.Image2Mat('rgb -> cv::Mat')
    display = highgui.imshow('Image Display', name='/camera/rgb/image_color', waitKey=5, autoSize=True, triggers=dict(save=ord('s')))
    bgr2rgb = imgproc.cvtColor('rgb -> bgr', flag=imgproc.Conversion.RGB2BGR)
    graph += [
        sync['image'] >> im2mat_rgb[:],
        im2mat_rgb[:] >> bgr2rgb[:],
        bgr2rgb[:] >> display[:],
        display['save'] >> bagwriter['__test__']
    ]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return plasm
    def connections(self):
        n_features = 1000
        orb = ORB(n_features=n_features, n_levels=3, scale_factor=1.1)
        graph = [ self.gray_image[:] >> orb['image'],
                  self.points3d[:] >> orb['points3d'],
                ]

        orb_ref = ORB(n_features=n_features, n_levels=3, scale_factor=1.1)
        graph = [ self.gray_image_ref[:] >> orb_ref['image'],
                  self.points3d_ref[:] >> orb_ref['points3d'],
                ]

        matcher = LSHMatcher('LSH', n_tables=4, multi_probe_level=1, key_size=10, radius=70)
#        matcher = Matcher()
        graph += [ orb['descriptors'] >> matcher['test'],
                   orb_ref['descriptors'] >> matcher['train'],
                  ]

        #3d estimation
        pose_estimation = self.pose_estimation
        graph += [matcher['matches'] >> pose_estimation['matches'],
                  orb['points'] >> pose_estimation['test_2d'],
                  orb_ref['points'] >> pose_estimation['train_2d'],
                  orb['points3d'] >> pose_estimation['test_3d'],
                  orb_ref['points3d'] >> pose_estimation['train_3d'],
                  ]

        if self.show_matches:
            #display matches
            match_drawer = DrawMatches()
            graph += [pose_estimation['matches'] >> match_drawer['matches'],
                      pose_estimation['matches_mask'] >> match_drawer['matches_mask'],
                      orb['points'] >> match_drawer['test'],
                      orb_ref['points'] >> match_drawer['train'],
                      self.rgb_image[:] >> match_drawer['test_image'],
                      orb_ref['image'] >> match_drawer['train_image'],
                      match_drawer['output'] >> imshow(name='Matches')['']
                      ]

        tr = self.tr
        fps = self.fps
        # pose_draw = PoseDrawer()
        # graph += [train['R', 'T'] >> tr['R1', 'T1'],
        #           pose_estimation['R', 'T'] >> tr['R2', 'T2'],
        #           tr['R', 'T'] >> pose_draw['R', 'T'],
        #           pose_estimation['found'] >> pose_draw['trigger'],
        #           self.K[:] >> pose_draw['K'],
        #           self.rgb_image[:] >> pose_draw['image'],
        #           pose_draw['output'] >> fps[:],
        #           ]
        return graph
    def connect_background_source(self):
        json_db_str = '{"type": "CouchDB", "root": "http://localhost:5984", "collection": "object_recognition"}'
        self.json_db = ecto.Constant(value=json_db_str)

        object_id_str = '4680aac58c1d263b9449d57bd2000f27'
        self.object_id = ecto.Constant(value=object_id_str)

        self.image_ci = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
        self.image = ecto_ros.Image2Mat()
        self.depth = ecto_ros.Image2Mat()

        bag = "/home/sam/rosbags/sigverse/no_objects.bag"

        ImageBagger = ecto_sensor_msgs.Bagger_Image
        CameraInfoBagger = ecto_sensor_msgs.Bagger_CameraInfo

        baggers = dict(
            image=ImageBagger(
                topic_name='/hsrb/head_rgbd_sensor/rgb/image_raw'),
            image_ci=CameraInfoBagger(
                topic_name='/hsrb/head_rgbd_sensor/rgb/camera_info'),
            depth=ImageBagger(
                topic_name='/hsrb/head_rgbd_sensor/depth/image_raw'),
        )

        # this will read all images in the path
        path = '/home/sam/Code/vision/VOCdevkit/VOC2012/JPEGImages'
        self.file_source = ImageReader(path=os.path.expanduser(path))

        self.bag_reader = ecto_ros.BagReader('Bag Reader',
                                             baggers=baggers,
                                             bag=bag,
                                             random_access=True,
                                             )

        self.rgb = imgproc.cvtColor(
            'bgr -> rgb', flag=imgproc.Conversion.BGR2RGB)
        self.display = highgui.imshow(name='Training Data', waitKey=10000)
        self.image_mux = ecto_yolo.ImageMux()

        self.graph += [
            self.bag_reader['image'] >> self.image['image'],
            self.bag_reader['depth'] >> self.depth['image'],
            self.image[:] >> self.rgb[:],
            self.bag_reader['image_ci'] >> self.image_ci['camera_info'],
            self.image_ci['K'] >> self.image_mux['K1'],
            self.rgb['image'] >> self.image_mux['image1'],
            self.depth['image'] >> self.image_mux['depth1'],
            self.file_source['image'] >> self.image_mux['image2'],
        ]
 def __init__(self, plasm):
     ecto.BlackBox.__init__(self, plasm)
     self.video_cap = highgui.VideoCapture(video_device=0)
     self.fps = highgui.FPSDrawer()
     self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
     self.circle_detector = calib.PatternDetector('Dot Detector',
                                             rows=7, cols=3, pattern_type="acircles",
                                             square_size=0.03)
     self.circle_drawer = calib.PatternDrawer('Circle Draw',
                                         rows=7, cols=3)
     self.circle_display = highgui.imshow('Pattern show',
                                     name='Pattern', waitKey=2, maximize=True)
     self.pose_calc = calib.FiducialPoseFinder('Pose Calc')
     self.pose_draw = calib.PoseDrawer('Pose Draw')
     self.camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")
Exemple #29
0
    def connections(self):
        n_features = 4000
        train = self.train
        orb = FeatureFinder('ORB test', n_features=n_features, n_levels=3, scale_factor=1.1, thresh=100, use_fast=False)
        graph = [ self.gray_image[:] >> orb['image'],
                  self.points3d[:] >> orb['points3d'],
                  self.depth_mask[:] >> orb['mask']
                ]

        matcher = LSHMatcher('LSH', n_tables=4, multi_probe_level=1, key_size=10, radius=70)
        #matcher = Matcher()
        graph += [ orb['descriptors'] >> matcher['test'],
                   train['descriptors'] >> matcher['train'],
                  ]

        #3d estimation
        pose_estimation = self.pose_estimation
        graph += [matcher['matches'] >> pose_estimation['matches'],
                  orb['points'] >> pose_estimation['test_2d'],
                  train['points'] >> pose_estimation['train_2d'],
                  orb['points3d'] >> pose_estimation['test_3d'],
                  train['points3d'] >> pose_estimation['train_3d'],
                  ]

        if self.show_matches:
            #display matches
            match_drawer = DrawMatches()
            graph += [pose_estimation['matches'] >> match_drawer['matches'],
                      pose_estimation['matches_mask'] >> match_drawer['matches_mask'],
                      orb['points'] >> match_drawer['test'],
                      train['points'] >> match_drawer['train'],
                      self.rgb_image[:] >> match_drawer['test_image'],
                      train['image'] >> match_drawer['train_image'],
                      match_drawer['output'] >> imshow(name='Matches')['']
                      ]

        tr = self.tr
        fps = self.fps
        pose_draw = PoseDrawer()
        graph += [train['R', 'T'] >> tr['R1', 'T1'],
                  pose_estimation['R', 'T'] >> tr['R2', 'T2'],
                  tr['R', 'T'] >> pose_draw['R', 'T'],
                  pose_estimation['found'] >> pose_draw['trigger'],
                  self.K[:] >> pose_draw['K'],
                  self.rgb_image[:] >> pose_draw['image'],
                  pose_draw['output'] >> fps[:],
                  ]
        return graph
Exemple #30
0
    def connections(self, _p):
        train = self.train
        orb = self.orb
        graph = [ self.gray_image[:] >> orb['image'],
                  self.points3d[:] >> orb['points3d'],
                  self.depth_mask[:] >> orb['mask']
                ]

        matcher = Matcher()
#        if self.use_lsh:
#           matcher = self.lsh
        graph += [ orb['descriptors'] >> matcher['test'],
                   train['descriptors'] >> matcher['train'],
                  ]

        #3d estimation
        pose_estimation = self.pose_estimation
        graph += [matcher['matches'] >> pose_estimation['matches'],
                  orb['points'] >> pose_estimation['test_2d'],
                  train['points'] >> pose_estimation['train_2d'],
                  orb['points3d'] >> pose_estimation['test_3d'],
                  train['points3d'] >> pose_estimation['train_3d'],
                  ]

        if self.show_matches:
            #display matches
            match_drawer = DrawMatches()
            graph += [pose_estimation['matches'] >> match_drawer['matches'],
                      pose_estimation['matches_mask'] >> match_drawer['matches_mask'],
                      orb['points'] >> match_drawer['test'],
                      train['points'] >> match_drawer['train'],
                      self.rgb_image[:] >> match_drawer['test_image'],
                      train['image'] >> match_drawer['train_image'],
                      match_drawer['output'] >> imshow(name='Matches')['']
                      ]

        tr = self.tr
        fps = self.fps
        pose_draw = PoseDrawer()
        graph += [train['R', 'T'] >> tr['R1', 'T1'],
                  pose_estimation['R', 'T'] >> tr['R2', 'T2'],
                  tr['R', 'T'] >> pose_draw['R', 'T'],
                  pose_estimation['found'] >> pose_draw['trigger'],
                  self.K[:] >> pose_draw['K'],
                  self.rgb_image[:] >> pose_draw['image'],
                  pose_draw['output'] >> fps[:],
                  ]
        return graph
Exemple #31
0
def make_graph(k, sigma, debug):

    video = highgui.VideoCapture(video_device=0)
    g = imgproc.GaussianBlur(sigma=sigma)
    graph = []
    graph += [video['image'] >> g['input']]
    for x in range(k):
        nextg = imgproc.GaussianBlur(sigma=sigma)
        graph += [g['out'] >> nextg['input']]
        g = nextg
    fps = highgui.FPSDrawer()
    graph += [
        g['out'] >> fps[:],
        fps[:] >> highgui.imshow("megagauss", name="megagauss", waitKey=10)[:]
    ]
    return graph
    def connections(self):
        # LSH matcher, somewhat sensitive to parameters
        #        matcher = LSHMatcher('LSH', n_tables=4, multi_probe_level=1, key_size=6, radius=55)
        # this is a brute force matcher
        matcher = Matcher()
        graph = [
            self.desc[:] >> matcher['test'],
            self.desc_ref[:] >> matcher['train'],
        ]

        #3d estimation
        pose_estimation = self.pose_estimation
        graph += [
            matcher['matches'] >> pose_estimation['matches'],
            self.points[:] >> pose_estimation['test_2d'],
            self.points_ref[:] >> pose_estimation['train_2d'],
            self.points3d[:] >> pose_estimation['test_3d'],
            self.points3d_ref[:] >> pose_estimation['train_3d'],
            self.K[:] >> pose_estimation['K']
        ]

        if self.show_matches:
            #display matches
            match_drawer = DrawMatches()
            graph += [
                pose_estimation['matches'] >> match_drawer['matches'],
                pose_estimation['matches_mask'] >>
                match_drawer['matches_mask'],
                self.points[:] >> match_drawer['test'],
                self.points_ref[:] >> match_drawer['train'],
                self.image[:] >> match_drawer['test_image'],
                self.image_ref[:] >> match_drawer['train_image'],
                match_drawer['output'] >> imshow(name='Matches')['']
            ]

        tr = self.tr
        fps = self.fps
        # pose_draw = PoseDrawer()
        # graph += [train['R', 'T'] >> tr['R1', 'T1'],
        #           pose_estimation['R', 'T'] >> tr['R2', 'T2'],
        #           tr['R', 'T'] >> pose_draw['R', 'T'],
        #           pose_estimation['found'] >> pose_draw['trigger'],
        #           self.K[:] >> pose_draw['K'],
        #           self.rgb_image[:] >> pose_draw['image'],
        #           pose_draw['output'] >> fps[:],
        #           ]
        return graph
    def connections(self):
        # LSH matcher, somewhat sensitive to parameters
#        matcher = LSHMatcher('LSH', n_tables=4, multi_probe_level=1, key_size=6, radius=55)  
         # this is a brute force matcher
        matcher = Matcher()  
        graph = [ self.desc[:] >> matcher['test'],
                   self.desc_ref[:] >> matcher['train'],
                  ]

        #3d estimation
        pose_estimation = self.pose_estimation
        graph += [matcher['matches'] >> pose_estimation['matches'],
                  self.points[:] >> pose_estimation['test_2d'],
                  self.points_ref[:] >> pose_estimation['train_2d'],
                  self.points3d[:] >> pose_estimation['test_3d'],
                  self.points3d_ref[:] >> pose_estimation['train_3d'],
                  self.K[:] >> pose_estimation['K']
                  ]

        if self.show_matches:
            #display matches
            match_drawer = DrawMatches()
            graph += [pose_estimation['matches'] >> match_drawer['matches'],
                      pose_estimation['matches_mask'] >> match_drawer['matches_mask'],
                      self.points[:] >> match_drawer['test'],
                      self.points_ref[:] >> match_drawer['train'],
                      self.image[:] >> match_drawer['test_image'],
                      self.image_ref[:] >> match_drawer['train_image'],
                      match_drawer['output'] >> imshow(name='Matches')['']
                      ]

        tr = self.tr
        fps = self.fps
        # pose_draw = PoseDrawer()
        # graph += [train['R', 'T'] >> tr['R1', 'T1'],
        #           pose_estimation['R', 'T'] >> tr['R2', 'T2'],
        #           tr['R', 'T'] >> pose_draw['R', 'T'],
        #           pose_estimation['found'] >> pose_draw['trigger'],
        #           self.K[:] >> pose_draw['K'],
        #           self.rgb_image[:] >> pose_draw['image'],
        #           pose_draw['output'] >> fps[:],
        #           ]
        return graph
 def __init__(self, plasm,rows,cols,pattern_type,square_size,debug=True):
     ecto.BlackBox.__init__(self, plasm)
     self.video_cap = ecto.Passthrough('Image Input')
     self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
     self.circle_detector = calib.PatternDetector('Dot Detector',
                                             rows=rows, cols=cols, 
                                             pattern_type=pattern_type,
                                             square_size=square_size)
     self.pose_calc = calib.FiducialPoseFinder('Pose Calc')
     self.camera_info = calib.CameraIntrinsics('Camera Info', 
                                               camera_file="camera.yml")
     self.debug = debug
     if self.debug:
         self.fps = highgui.FPSDrawer()
         self.circle_drawer = calib.PatternDrawer('Circle Draw',
                                                  rows=rows, cols=cols)
         self.circle_display = highgui.imshow('Pattern show',
                                              name='Pattern', 
                                              waitKey=2, autoSize=True)
         self.pose_draw = calib.PoseDrawer('Pose Draw')
def do_ecto():
    # ecto options
    parser = argparse.ArgumentParser(
        description='Publish images from directory.')
    scheduler_options(parser)
    args = parser.parse_args()

    #this will read all images in the path
    path = '/home/sam/Code/vision/VOCdevkit/VOC2012/JPEGImages'
    images = ImageReader(path=os.path.expanduser(path))
    mat2image = ecto_ros.Mat2Image(encoding='rgb8')

    pub_rgb = ImagePub("image_pub", topic_name='/camera/rgb/image_raw')
    display = imshow(name='image', waitKey=5000)

    plasm = ecto.Plasm()
    plasm.connect(
        images['image'] >> mat2image['image'],
        images['image'] >> display['image'],
        mat2image['image'] >> pub_rgb['input'],
    )

    ecto.view_plasm(plasm)
    run_plasm(args, plasm, locals=vars())
Exemple #36
0
#debug = True

plasm = ecto.Plasm()

video = highgui.VideoCapture(video_device=0)
rgb2gray = imgproc.cvtColor(flag=7)
gaussian = imgproc.GaussianBlur(sigma=2.0)
circle_drawer = calib.CircleDrawer()
circle_drawer2 = calib.CircleDrawer()
pong = calib.PingPongDetector(dp=2,
                              maxRadius=500,
                              minRadius=1,
                              param1=200,
                              param2=100,
                              minDist=20)

print pong.__doc__
show_circles = highgui.imshow("show circles", name="Circles", waitKey=10)

plasm.connect(video["image"] >> (rgb2gray["input"], circle_drawer["image"]),
              rgb2gray["out"] >> gaussian["input"],
              gaussian["out"] >> pong["image"],
              pong["circles"] >> circle_drawer["circles"],
              circle_drawer["image"] >> show_circles["input"])
if debug:
    print plasm.viz()
    ecto.view_plasm(plasm)

sched = ecto.schedulers.Threadpool(plasm)
sched.execute(nthreads=8)
Exemple #37
0
#!/bin/python
import ecto
#import ecto_opencv.cv_bp as opencv
from ecto_opencv import highgui, calib, imgproc, line_mod, cv_bp as cv
import os

debug = True

plasm = ecto.Plasm()  #Constructor for Plasm

raw_image = highgui.imshow(name="raw image", waitKey=-1, autoSize=True)
highgui_db_color = highgui.imshow(name="db_color", waitKey=-1, autoSize=True)

bin_color = line_mod.ColorMod(gsize=5, gsig=2.0)
db_color = line_mod.ColorDebug()
coded_color = highgui.imshow(name="coded_color", waitKey=-1, autoSize=True)
#templ_calc = line_mod.ColorTemplCalc(skipx = 5,skipy=5,hist_type=1)
#train = line_mod.TrainColorTempl(acceptance_threshold=0.999,threshold=0.95)
#test = line_mod.TestColorTempl(filename="test_tree.txt")
testimage = line_mod.TestColorImage(filename="test_tree.txt",
                                    template_size=cv.Size(45, 120),
                                    sample_skipx=5,
                                    sample_skipy=5,
                                    match_thresh=0.985,
                                    search_skipx=5,
                                    search_skipy=5,
                                    hist_type=1,
                                    overlap_thresh=.75)
#IdGen = line_mod.IdGenerator(object_id="Odwalla");
DrawDetections = line_mod.DrawDetections()
Exemple #38
0
#!/usr/bin/env python
import ecto
from ecto_opencv.highgui import VideoCapture, imshow, FPSDrawer

video_cap = VideoCapture(video_device=0, width=640, height=480)
fps = FPSDrawer()

plasm = ecto.Plasm()
plasm.connect(video_cap['image'] >> fps['image'],
              fps['image'] >> imshow(name='video_cap')['image'],
              )

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm, description='Capture a video from the device and display it.')
#!/usr/bin/env python
import ecto
from ecto_opencv.highgui import imshow, NiConverter, FPSDrawer
from ecto_opencv.imgproc import ConvertTo
from ecto_opencv.cv_bp import CV_8UC1
from ecto_openni import OpenNICapture, DEPTH_RGB, DEPTH_IR
device = 0
capture = OpenNICapture(stream_mode=DEPTH_RGB, registration=False)
next_mode = DEPTH_IR
fps = FPSDrawer('fps')
conversion = ConvertTo(cv_type=CV_8UC1, alpha=0.5) # scale the IR so that we can visualize it.
plasm = ecto.Plasm()
plasm.insert(capture)
plasm.connect(capture['image'] >> fps[:],
              fps[:] >> imshow('image display', name='image')[:],
              capture['depth'] >> imshow('depth display', name='depth')[:],
              capture['ir'] >> conversion[:],
              conversion[:] >> imshow('IR display', name='IR')[:],
              )

sched = ecto.schedulers.Singlethreaded(plasm)
while True:
    sched.execute(niter=100)
    #swap it modes
    next_mode, capture.params.stream_mode = capture.params.stream_mode, next_mode

Exemple #40
0
video_cap = highgui.VideoCapture(video_device=0)

fps = highgui.FPSDrawer()
rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.RGB2GRAY)

detect = calib.PatternDetector(rows=5, cols=3,
                               pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                               square_size=0.04)

draw = calib.PatternDrawer(rows=5, cols=3)

pose_calc = calib.FiducialPoseFinder('Pose Calc')
pose_draw = calib.PoseDrawer('Pose Draw')
camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")

imshow = highgui.imshow(name='Pattern', waitKey=2)

plasm.connect(
    video_cap['image'] >> rgb2gray['input'],
    rgb2gray['out'] >> detect['input'],
    detect['out', 'found'] >> draw['points', 'found'],
    video_cap['image'] >> draw['input'],
    camera_info['K'] >> (pose_calc['K'], pose_draw['K']),
    detect['out', 'ideal', 'found'] >> pose_calc['points', 'ideal', 'found'],
    pose_calc['R', 'T'] >> pose_draw['R', 'T'],
    draw['out'] >> pose_draw['image'],
    pose_draw['output'] >> fps['image'],
    fps['image'] >> imshow['input']
    )

if __name__ == "__main__":
Exemple #41
0
#!/usr/bin/env python
#
# Simple vid cap
#
import ecto
from ecto_opencv import highgui, calib, imgproc

plasm = ecto.Plasm()
video_cap = highgui.VideoCapture(video_device=0)

fps = highgui.FPSDrawer()

video_display = highgui.imshow('imshow',
                               name='video_cap', waitKey=2)

saver = highgui.VideoWriter("saver", video_file='ecto.avi', fps=15)

plasm.connect(video_cap['image'] >> fps['image'],
              fps['image'] >> video_display['input'],
              fps['image'] >> saver['image'],
              )

if __name__ == '__main__':
    ecto.view_plasm(plasm)
    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()

Exemple #42
0
#!/bin/python
import ecto
#import ecto_opencv.cv_bp as opencv
from ecto_opencv import highgui, calib, imgproc, line_mod

debug = True

plasm = ecto.Plasm()  #Constructor for Plasm
video = highgui.VideoCapture(video_device=1)
bin_color = line_mod.ColorMod(thresh_bw=20)
db_color = line_mod.ColorMod();
coded_color = highgui.imshow(name="coded_color", waitKey=10, autoSize=True)
raw_image = highgui.imshow(name="raw image", waitKey= -1, autoSize=True)
highgui_db_color = highgui.imshow(name="db_color", waitKey= -1, autoSize=True)


if debug:
    ecto.print_module_doc(video)
    ecto.print_module_doc(coded_color)

plasm.connect(video, "image", bin_color, "image")
plasm.connect(video, "image", raw_image, "input")
plasm.connect(bin_color, "output", coded_color, "input")
plasm.connect(bin_color, "output", db_color, "input")
plasm.connect(db_color, "output", highgui_db_color, "input")

if debug:
    ecto.view_plasm(plasm)

thresh_gt = 10
bin_color.params.thresh_gt = thresh_gt;
Exemple #43
0
#ecto options
scheduler_options(parser.add_argument_group('Scheduler Options'))
args = parser.parse_args()

if not os.path.exists(args.output):
    os.makedirs(args.output)

source = create_source(
    package_name='image_pipeline',
    source_type='OpenNISource')  #optionally pass keyword args here...

depth_saver = ecto.If(
    'depth saver',
    cell=ImageSaver(
        filename_format=os.path.join(args.output, 'depth%04d.png')))
image_saver = ecto.If(
    'rgb saver',
    cell=ImageSaver(
        filename_format=os.path.join(args.output, 'image%04d.png')))

display = imshow(name='RGB', triggers=dict(save=ord('s')))

plasm = ecto.Plasm()
plasm.connect(
    source['image'] >> (display['image'], image_saver['image']),
    source['depth'] >> (imshow(name='Depth')['image'], depth_saver['image']),
    display['save'] >> (image_saver['__test__'], depth_saver['__test__']))

run_plasm(args, plasm, locals=vars())
Exemple #44
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired=72,
                                            orb_template='', res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []

    # try several parameter combinations
    source = create_source('image_pipeline', 'OpenNISource', outputs_list=['K', 'K', 'camera', 'image',
                                                                           'depth', 'points3d',
                                                                           'mask_depth'], res=res, fps=fps)

    # convert the image to grayscale
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [source['image'] >> rgb2gray[:] ]

    # Find planes
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    graph += [ # find the normals
                source['K', 'points3d'] >> compute_normals['K', 'points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                source['K', 'points3d'] >> plane_est['K', 'points3d'] ]

    if orb_template:
        # find the pose using ORB
        poser = OrbPoseEstimator(directory=orb_template, show_matches=orb_matches)
        graph += [ source['image', 'K', 'mask_depth', 'points3d'] >> poser['color_image', 'K', 'mask', 'points3d'],
                   rgb2gray[:] >> poser['image']
                 ]
    else:
        # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only
        poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
        graph += [ source['image', 'K'] >> poser['color_image', 'K'],
                   rgb2gray[:] >> poser['image'] ]

    # filter the previous pose and resolve the scale ambiguity using 3d
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ source['K'] >> pose_filter['K'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]

    # draw the found pose
    pose_drawer = calib.PoseDrawer('Pose Draw')
    display = highgui.imshow(name='Poses')
    graph += [ pose_filter['found'] >> pose_drawer['trigger'],
               poser['debug_image'] >> pose_drawer['image'],
               source['K'] >> pose_drawer['K'],
               pose_filter['R', 'T'] >> pose_drawer['R', 'T'],
               pose_drawer['output'] >> display[:] ]

    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))

    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]

    # publish the source data
    rgbMsg = Mat2Image(frame_id='/camera_rgb_optical_frame', swap_rgb=True)
    depthMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ source['depth'] >> depthMsg[:],
               source['image'] >> rgbMsg[:] ]

    # mask out the object
    masker = segmentation_cell
    graph += [ source['points3d'] >> masker['points3d'],
               plane_est['masks', 'planes'] >> masker['masks', 'planes'],
               pose_filter['T'] >> masker['T'] ]

    # publish the mask
    maskMsg = Mat2Image(frame_id='/camera_rgb_optical_frame')
    graph += [ masker['mask'] >> maskMsg[:] ]

    camera2cv = CameraModelToCv()
    cameraMsg = Cv2CameraInfo(frame_id='/camera_rgb_optical_frame')
    graph += [source['camera'] >> camera2cv['camera'],
              camera2cv['K', 'D', 'image_size'] >> cameraMsg['K', 'D', 'image_size']
              ]
    #display the mask
    mask_display = object_recognition_capture.MaskDisplay()
    mask_display_highgui = highgui.imshow(name='mask')
    graph += [
              masker['mask'] >> mask_display['mask'],
              source['image'] >> mask_display['image'],
              mask_display['image'] >> mask_display_highgui['image'],
            ]
    if not preview:
        baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   mask=ImageBagger(topic_name='/camera/mask'),
                   pose=PoseBagger(topic_name='/camera/pose'),
                   image_ci=CameraInfoBagger(topic_name='/camera/rgb/camera_info'),
                   depth_ci=CameraInfoBagger(topic_name='/camera/depth/camera_info'),
                   )
        bagwriter = ecto.If('Bag Writer if R|T',
                            cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                            )

        graph += [
                  rgbMsg[:] >> bagwriter['image'],
                  depthMsg[:] >> bagwriter['depth'],
                  cameraMsg[:] >> (bagwriter['image_ci'], bagwriter['depth_ci']),
                  poseMsg['pose'] >> bagwriter['pose'],
                  maskMsg[:] >> bagwriter['mask'],
                  ]
        novel = delta_pose['novel']
        if use_turn_table:
            table = TurnTable(angle_thresh=angle_thresh)
            ander = ecto.And()
            graph += [
                  table['trigger'] >> (delta_pose['__test__'], ander['in2']),
                  delta_pose['novel'] >> ander['in1'],
                  ]
            novel = ander['out']
        else:
            delta_pose.inputs.__test__ = True

        graph += [novel >> (bagwriter['__test__'])]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
    info2cv = ecto_ros.CameraInfo2Cv("Info to CV")
    to_pose_stamped = ecto_ros.RT2PoseStamped("To Pose Stamped",frame_id="openni_rgb_optical_frame")
    
    #pose estimation
    rows = 5; cols = 3; square_size=0.04; pattern_type=calib.ASYMMETRIC_CIRCLES_GRID
    circle_detector = calib.PatternDetector(rows=rows, cols=cols,
                                        pattern_type=pattern_type,
                                        square_size=square_size)
    poser = calib.FiducialPoseFinder()
    offset = ecto_corrector.TranslationOffset("Offset",tx=-0.079,ty=0.120,tz=0.000)

    #visualization
    circle_drawer = calib.PatternDrawer(rows=rows, cols=cols)
    pose_drawer = calib.PoseDrawer()
    show_triggers = {'c_key':ord('c')}
    show = highgui.imshow("imshow",waitKey=10,triggers=show_triggers)
    
    #bagging
    baggers = dict(image=ecto_sensor_msgs.Bagger_Image(topic_name='image_color'),
               info=ecto_sensor_msgs.Bagger_CameraInfo(topic_name='camera_info'),
               cloud=ecto_sensor_msgs.Bagger_PointCloud2(topic_name="points"),
               pose=ecto_geometry_msgs.Bagger_PoseStamped(topic_name="pose"),
               )
    bagwriterif = ecto.If('Bag Writer if key',
                        cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name)
                        )

    plasm = ecto.Plasm()
    plasm.connect(
        #conversions
        sync["image"]  >>  img2mat[:],
Exemple #46
0
import os
import math
import subprocess

import couchdb

import ecto
from ecto_opencv import calib, highgui, imgproc
import object_recognition
from object_recognition import tools as dbtools, models, capture
from ecto_object_recognition import reconstruction, conversion
import ecto_pcl
from tempfile import NamedTemporaryFile

cloud_view = ecto_pcl.CloudViewer("Cloud Display", window_name="PCD Viewer")[:]
imshows = dict(image=highgui.imshow('Image Display', name='image')[:],
               mask=highgui.imshow('Mask Display', name='mask')[:],
               depth=highgui.imshow('Depth Display', name='depth')[:])


def generate_pointclouds_in_object_space(dbs, session, args):
    object_name = dbs[session.object_id]['object_name']
    if not os.path.exists(object_name):
        os.mkdir(object_name)
    obs_ids = models.find_all_observations_for_session(dbs, session.id)
    if len(obs_ids) == 0:
        raise RuntimeError("There are no observations available.")
    db_reader = capture.ObservationReader(
        'Database Source', db_params=dbtools.args_to_db_params(args))

    #observation dealer will deal out each observation id.
Exemple #47
0
#!/usr/bin/env python
import ecto
from ecto_opencv.highgui import VideoCapture, imshow, FPSDrawer
from ecto_opencv.imgproc import Canny, RGB2GRAY, cvtColor

video_cap = VideoCapture(video_device=0, width=640, height=480)
fps = FPSDrawer()
canny = Canny()
gray = cvtColor(flag=RGB2GRAY)
plasm = ecto.Plasm()
plasm.connect(
    video_cap['image'] >> gray[:],
    gray[:] >> canny['image'],
    canny['image'] >> fps['image'],
    fps['image'] >> imshow(name='Canny Image')['image'],
    video_cap['image'] >> imshow(name='Original Image')['image'],
)

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm, description='Capture a video from the device and display it.')
Exemple #48
0
square_size = 0.025  #2.5 cm
pattern_type = CHESSBOARD

rgb2gray = cvtColor(flag=Conversion.RGB2GRAY)

detector = PatternDetector(rows=rows,
                           cols=cols,
                           pattern_type=pattern_type,
                           square_size=square_size)

pattern_drawer = PatternDrawer(rows=rows, cols=cols)
camera_intrinsics = CameraIntrinsics(camera_file=args.camera)
poser = FiducialPoseFinder()
pose_drawer = PoseDrawer()
plasm = ecto.Plasm()
plasm.connect(
    image_reader['image'] >> (pattern_drawer['input'], rgb2gray['image']),
    rgb2gray['image'] >> detector['input'],
    detector['ideal', 'out', 'found'] >> poser['ideal', 'points', 'found'],
    camera_intrinsics['K'] >> poser['K'],
    detector['out', 'found'] >> pattern_drawer['points', 'found'],
    poser['R', 'T'] >> pose_drawer['R', 'T'],
    poser['R'] >> MatPrinter(name='R')['mat'],
    poser['T'] >> MatPrinter(name='T')['mat'],
    pattern_drawer['out'] >> pose_drawer['image'],
    camera_intrinsics['K'] >> pose_drawer['K'],
    pose_drawer['output'] >> imshow(name='Pose', waitKey=0)['image'],
)

sched = ecto.schedulers.Singlethreaded(plasm)
sched.execute()
Exemple #49
0
plasm = ecto.Plasm()
sched = ecto.schedulers.Singlethreaded(plasm)

video_cap = highgui.VideoCapture(video_device=0)

fps = highgui.FPSDrawer()

rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)

circle_detector = calib.PatternDetector(rows=7,
                                        cols=3,
                                        pattern_type="acircles",
                                        square_size=0.03)
circle_drawer = calib.PatternDrawer(rows=7, cols=3)
circle_display = highgui.imshow('Pattern show',
                                name='Pattern',
                                waitKey=2,
                                maximize=True)

pose_calc = calib.FiducialPoseFinder('Pose Calc')

pose_draw = calib.PoseDrawer('Pose Draw')

camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")

plasm.connect(
    video_cap['image'] >> circle_drawer['input'],
    circle_drawer['out'] >> pose_draw['image'],
    pose_draw['output'] >> fps['image'],
    fps['image'] >> circle_display['input'],
    video_cap['image'] >> rgb2gray['input'],
    rgb2gray['out'] >> circle_detector['input'],
#!/bin/python
import ecto
from ecto_opencv import imgproc, highgui, features2d
import time
#import orb as imgproc

debug = False
#debug = True

plasm = ecto.Plasm()

video = highgui.VideoCapture(video_device=0)
orb_m = features2d.ORB(n_features=2500)
draw_kpts = features2d.DrawKeypoints()
imshow = highgui.imshow(name="ORB", waitKey=2, autoSize=True)
rgb2gray = imgproc.cvtColor(flag=7)

plasm.connect(video, "image", rgb2gray, "input")
plasm.connect(rgb2gray, "out", orb_m, "image")
plasm.connect(orb_m, "kpts", draw_kpts, "kpts")
plasm.connect(video, "image", draw_kpts, "input")
plasm.connect(draw_kpts, "output", imshow, "input")

if debug:
    print plasm.viz()
    ecto.view_plasm(plasm)

prev = time.time()
count = 0

while (imshow.outputs.out != 27):
Exemple #51
0
from ecto_openni import OpenNICapture, DEPTH_RGB, DEPTH_IR, RGB, IR, IRGamma
from image_pipeline import StereoCalibration, Points2DAccumulator, Points3DAccumulator

from pattern_helpers import *

create_detector_drawer = create_detector_drawer_chessboard

ir_reader = ImageReader(path='ir')
rgb_reader = ImageReader(path='rgb')

plasm = ecto.Plasm()

ir_detector, ir_drawer = create_detector_drawer()
rgb_detector, rgb_drawer = create_detector_drawer()

rgb_display = imshow(name="RGB Points")

plasm.connect(ir_reader['image'] >> (ir_detector[:], ir_drawer['input']),
              rgb_reader['image'] >> (rgb_detector[:], rgb_drawer['input']),
              ir_detector['found', 'out'] >> ir_drawer['found', 'points'],
              rgb_detector['found', 'out'] >> rgb_drawer['found', 'points'],
              rgb_drawer[:] >> rgb_display[:],
              ir_drawer[:] >> imshow(name='Depth points')[:])

#triggers
ander = ecto.And(ninput=2)
plasm.connect(
    rgb_detector['found'] >> ander['in1'],
    ir_detector['found'] >> ander['in2'],
)
Exemple #52
0
scheduler_options(parser.add_argument_group('Scheduler Options'))
args = parser.parse_args()

if not os.path.exists(args.output):
    os.makedirs(args.output)

#camera = VideoCapture(video_device=1,width=int(args.width),height=int(args.height))
source = create_source(
    package_name='image_pipeline',
    source_type='OpenNISource')  #optionally pass keyword args here...
depth_saver = ImageSaver(
    filename_format=os.path.join(args.output, 'frame_%010d_depth.png'))
image_saver = ImageSaver(
    filename_format=os.path.join(args.output, 'frame_%010d_image.png'))
#mono_saver = ImageSaver(filename_format=os.path.join(args.output, 'frame_%010d_mono.png'))

plasm = ecto.Plasm()
plasm.connect(
    #      camera['image'] >> imshow(name='Mono')['image'],
    source['depth'] >> imshow(name='Depth')['image'],
    source['image'] >> imshow(name='RGB')['image'])

if not args.preview:
    plasm.connect(
        source['image'] >> image_saver['image'],
        source['depth'] >> depth_saver['image'],
        #        camera['image'] >> mono_saver['image'],
    )

run_plasm(args, plasm, locals=vars())
Exemple #53
0
#!/usr/bin/env python
import ecto
from ecto_opencv.highgui import VideoCapture, imshow, FPSDrawer
from ecto_opencv.imgproc import Scale, Interpolation

video_cap = VideoCapture(video_device=0, width=640, height=480)
fps = FPSDrawer()
factor = 0.1
scale_down = Scale(factor=factor, interpolation=Interpolation.AREA)
scale_up = Scale(factor=1 / factor, interpolation=Interpolation.LANCZOS4)

plasm = ecto.Plasm()
plasm.connect(
    video_cap["image"] >> scale_down["image"],
    scale_down["image"] >> scale_up["image"],
    scale_up["image"] >> fps["image"],
    fps["image"] >> imshow(name="Rescaled")["image"],
)

if __name__ == "__main__":
    from ecto.opts import doit

    doit(plasm, description="Capture a video from the device and display it.", locals=vars())
Exemple #54
0
def parse_args():
    import argparse
    parser = argparse.ArgumentParser(
        description='Computes the odometry between frames.')
    scheduler_options(parser.add_argument_group('Scheduler'))
    options = parser.parse_args()

    return options


if __name__ == '__main__':
    options = parse_args()

    video = VideoCapture(video_device=0)
    lucid = LUCID()
    draw_kpts = DrawKeypoints()
    rgb2gray = cvtColor(flag=Conversion.RGB2GRAY)
    fps = FPSDrawer()

    plasm = ecto.Plasm()
    plasm.connect(
        video['image'] >> rgb2gray['image'],
        rgb2gray['image'] >> lucid['image'],
        lucid['keypoints'] >> draw_kpts['keypoints'],
        video['image'] >> draw_kpts['image'],
        draw_kpts['image'] >> fps[:],
        fps[:] >> imshow('eLUCID display', name='LUCID')['image'],
    )

    run_plasm(options, plasm, locals=vars())
device = Capture('ni device', rgb_resolution=ResolutionMode.VGA_RES,
           depth_resolution=ResolutionMode.VGA_RES,
           rgb_fps=30, depth_fps=30,
           device_number=0,
           registration=True,
           synchronize=False,
           device=Device.KINECT
           )
cloud_generator = NiConverter('cloud_generator')
graph = [   device[:] >> cloud_generator[:], ]

#detection
edge_detector = DepthEdgeDetector("Edge Detector",depth_threshold=0.02, \
      erode_size=3,open_size=3)
graph += [cloud_generator[:] >> edge_detector["input"], ]

#drawing
im_drawer = imshow("Drawer",name="depth edges", waitKey=10)
graph += [edge_detector[:] >> im_drawer[:]]

plasm = ecto.Plasm()
plasm.connect(graph)

if __name__ == "__main__":
    
    if(debug):
        ecto.view_plasm(plasm)
    
    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()   
from image_pipeline import Rectifier, StereoModelLoader, DepthRegister, CameraModelToCv, CV_INTER_NN
from ecto_openni import OpenNICapture, DEPTH_RGB, DEPTH_IR, RGB, IR, IRGamma, enumerate_devices
from ecto_object_recognition.conversion import MatToPointCloudXYZRGB
from ecto_pcl import PointCloudT2PointCloud, CloudViewer, XYZRGB

openni_reg = True
print enumerate_devices()

capture = OpenNICapture('Camera',
                        stream_mode=DEPTH_RGB,
                        registration=openni_reg,
                        latched=False)
depthTo3d = DepthTo3d('Depth ~> 3d')
to_xyzrgb = MatToPointCloudXYZRGB('OpenCV ~> PCL')
pcl_cloud = PointCloudT2PointCloud('conversion', format=XYZRGB)
cloud_viewer = CloudViewer('Cloud Display')

plasm = ecto.Plasm()
plasm.connect(capture['K'] >> depthTo3d['K'],
              capture['image'] >> imshow('Image Display')[:],
              capture['depth'] >> depthTo3d['depth'],
              depthTo3d['points3d'] >> to_xyzrgb['points'],
              capture['image'] >> to_xyzrgb['image'],
              to_xyzrgb[:] >> pcl_cloud[:], pcl_cloud[:] >> cloud_viewer[:])

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm,
         description='Capture Kinect depth and RGB and register them.',
         locals=vars())
            depth_ci=CameraInfoSub(topic_name='/camera/depth_registered/camera_info', queue_size=0),
            )

sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                             )

im2mat_rgb = ecto_ros.Image2Mat('rgb -> cv::Mat')
camera_info = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
poser = OpposingDotPoseEstimator(plasm,
                                 rows=5, cols=3,
                                 pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                 square_size=0.04, debug=True)

bgr2rgb = imgproc.cvtColor('rgb -> bgr', flag=imgproc.Conversion.RGB2BGR)
rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
display = highgui.imshow('Poses', name='Poses', waitKey=5, autoSize=True)

graph = [sync['image'] >> im2mat_rgb[:],
          im2mat_rgb[:] >> (rgb2gray[:], bgr2rgb[:]),
          bgr2rgb[:] >> poser['color_image'],
          rgb2gray[:] >> poser['image'],
          poser['debug_image'] >> display['input'],
          sync['image_ci'] >> camera_info['camera_info'],
          camera_info['K'] >> poser['K'],
          ]
plasm.connect(graph)

if __name__ == '__main__':
    import sys
    ecto_ros.init(sys.argv, "opposing_dots_pose", False)
    from ecto.opts import doit
Exemple #58
0
                                     debug=debug)
    camera_info = calib.CameraIntrinsics('Camera Info',
                                         camera_file="camera.yml")
    subs = dict(
        image=ImageSub(topic_name='camera/rgb/image_color', queue_size=0),
        depth=ImageSub(topic_name='camera/depth/image', queue_size=0),
    )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    brg2rgb = imgproc.cvtColor('bgr -> rgb', flag=4)
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
    gray2rgb = imgproc.cvtColor('gray -> rgb', flag=8)
    print gray2rgb.__doc__

    display = highgui.imshow('Poses', name='Poses', waitKey=2, autoSize=True)
    mask_display = highgui.imshow('Masks',
                                  name='Masks',
                                  waitKey=-1,
                                  autoSize=True)
    object_display = highgui.imshow('Object',
                                    name='Object',
                                    waitKey=-1,
                                    autoSize=True)

    imsaver = highgui.ImageSaver('pose image saver', filename='image_pose_')
    rawsaver = highgui.ImageSaver('raw image saver', filename='image_raw_')
    masker = calib.PlanarSegmentation(z_min=0.015, y_crop=0.15, x_crop=0.15)
    bitwise_and = imgproc.BitwiseAnd()

    im2mat_rgb = ecto_ros.Image2Mat()
Exemple #59
0
# 
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# 
import ecto
from ecto_X import Sink
from ecto_opencv.highgui import imshow, FPSDrawer

video = Sink(url='localhost', port=2932)
fps = FPSDrawer()
video_display = imshow(name='video_cap', waitKey=0)

plasm = ecto.Plasm()
plasm.connect(video[:] >> fps['image'],
              fps['image'] >> video_display['input'],
              )

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm, description='Capture a video from the device and display it.')