示例#1
0
    def __init__(self, plasm):
        """
        sinks: a list of the sinks to use
        """
        ecto.BlackBox.__init__(self, plasm)

        self._object_ids_splitter = ecto.Passthrough()
        self._Rs_splitter = ecto.Passthrough()
        self._Ts_splitter = ecto.Passthrough()

        # try to import ecto_ros
        self._do_use_ros = True
        try:
            __import__('ecto_ros')
        except:
            self._do_use_ros = False

        if self._do_use_ros:
            import ecto_ros
            ecto_ros.init(sys.argv, "ecto_node")
            self._image_message_splitter = ecto.Passthrough()

        # add the different possible outputs
        self._cell_factory = {}
        self._cells = []
示例#2
0
def do_ecto(bagname, msg_counts, Scheduler):
    ecto_ros.init(sys.argv, "image_sub_node")
    subs = dict(
                image=ImageSub(topic_name='/camera/rgb/image_color', queue_size=0),
                depth=ImageSub(topic_name='/camera/depth/image', queue_size=0),
                depth_info=CameraInfoSub(topic_name='/camera/depth/camera_info', queue_size=0),
                image_info=CameraInfoSub(topic_name='/camera/rgb/camera_info', queue_size=0),
             )
    
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                                 )
    counter_rgb = ecto.Counter()
    counter_depth = ecto.Counter()
    counter_rgb_info = ecto.Counter()
    counter_depth_info = ecto.Counter()

    graph = [
                sync["image"] >> counter_rgb[:],
                sync["depth"] >> counter_depth[:],
                sync["image_info"] >> counter_rgb_info[:],
                sync["depth_info"] >> counter_depth_info[:],
            ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    sched = Scheduler(plasm)
    sched.execute_async()
    rosbag = play_bag(bagname, delay=3, rate=1)
    wait_bag(rosbag)
    time.sleep(1)
    sched.stop()

    print "expecting RGB count:", msg_counts['/camera/rgb/image_color']
    print "RGB count:", counter_rgb.outputs.count
示例#3
0
    def __init__(self):
        ecto_ros.init(sys.argv,"ecto_pcl_demo")
        
        parser = argparse.ArgumentParser(description='My awesome program thing.')
        
        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()

        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.01)
        cropper = Cropper("cropper", x_min=-0.25, x_max=0.25, y_min=-0.25, y_max=0.25, z_min=0.0, z_max=1.0)
        ror = RadiusOutlierRemoval("ror", min_neighbors=1, search_radius=1.0)

        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
        
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='/ecto_pcl/cloud_filtered')

        plasm.connect(cloud_sub[:] >> msg2cloud[:],
                      msg2cloud[:] >> voxel_grid[:],
                      voxel_grid[:] >> ror[:],
                      ror[:] >> cloud2msg[:],
                      cloud2msg[:] >> cloud_pub[:])
    
        run_plasm(options, plasm, locals=vars())
示例#4
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired,
                                            orb_template='', res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []
    
#   Create ros node, subscribing to openni topics
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(argv)
    ecto_ros.init(argv, "object_capture_msd_pcl", False)
    cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points') 
    rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_raw')
    depth_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("depth_image_sub", topic_name='/camera/depth_registered/image_raw')  
    rgb_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("rgb_camera_info_sub", topic_name='/camera/rgb/camera_info')
    depth_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("depth_camera_info_sub", topic_name='/camera/depth_registered/camera_info')
        
    
#   Converte ros topics to cv types
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
    image2cv = ecto_ros.Image2Mat()
    depth2cv = ecto_ros.Image2Mat()
    rgb_info2cv = ecto_ros.CameraInfo2Cv()
    depth_info2cv = ecto_ros.CameraInfo2Cv()


    graph += [ cloud_sub['output'] >> msg2cloud[:],
               rgb_image_sub[:] >> image2cv[:],
               depth_image_sub[:] >> depth2cv[:],
               rgb_camera_info_sub[:] >> rgb_info2cv[:],
               depth_camera_info_sub[:] >> depth_info2cv[:]
                ]
                                                        
    
     #display the mask

    display = highgui.imshow(name='Poses')
    graph += [image2cv[:] >> display[:],
            ]
    
    # convert the image to grayscale                                                                      
#    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
#    graph += [image2cv[:]  >> rgb2gray[:] ]
    
        # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only           
    poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
    graph += [ image2cv[:] >> poser['color_image'],
               rgb_info2cv['K'] >> poser['K_image'],
               image2cv[:] >> poser['image'] ] 
    
    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
示例#5
0
    def __init__(self):
        ecto_ros.init(sys.argv, "ecto_pcl_demo")

        parser = argparse.ArgumentParser(
            description='My awesome program thing.')

        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()

        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.01)
        cropper = Cropper("cropper",
                          x_min=-0.25,
                          x_max=0.25,
                          y_min=-0.25,
                          y_max=0.25,
                          z_min=0.0,
                          z_max=1.0)

        convex_hull = ConvexHull("convex_hull")
        ror = RadiusOutlierRemoval("ror", min_neighbors=1, search_radius=1.0)
        sor = StatisticalOutlierRemoval("sor", mean_k=1)

        extract_indices = ExtractIndices("extract_indices", negative=False)
        extract_clusters = EuclideanClusterExtraction("extract_clusters",
                                                      min_cluster_size=50,
                                                      cluster_tolerance=0.005)
        extract_largest_cluster = ExtractLargestCluster(
            "extract_largest_cluster")

        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2(
            "cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud",
                                                    format=ecto_pcl.XYZRGB)

        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2(
            "cloud_pub", topic_name='/ecto_pcl/sample_output')

        plasm.connect(
            cloud_sub[:] >> msg2cloud[:], msg2cloud[:] >> extract_clusters[:],
            extract_clusters[:] >> extract_largest_cluster["clusters"],
            msg2cloud[:] >> extract_largest_cluster["input"],
            extract_largest_cluster[:] >> cloud2msg[:],
            cloud2msg[:] >> cloud_pub[:])

        #        plasm.connect(cloud_sub[:] >> msg2cloud[:],
        #                      msg2cloud[:] >> voxel_grid[:],
        #                      msg2cloud[:] >> extract_indices["input"],
        #                      voxel_grid[:] >> cropper[:],
        #                      cropper[:] >> extract_clusters[:],
        #                      extract_indices[:] >> extract_clusters[:],
        #                      extract_clusters[:] >> extract_largest_cluster["clusters"],
        #                      cropper[:] >>  extract_largest_cluster["input"],
        #                      extract_largest_cluster[:] >> cloud2msg[:],
        #                      cloud2msg[:] >> cloud_pub[:])

        run_plasm(options, plasm, locals=vars())
示例#6
0
    def __init__(self):
        ecto_ros.init(sys.argv, "ecto_pcl_demo")

        parser = argparse.ArgumentParser(description='Ecto Largest Cluster')

        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()

        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.015)
        cropper = Cropper("cropper",
                          x_min=-0.25,
                          x_max=0.25,
                          y_min=-0.25,
                          y_max=0.25,
                          z_min=0.0,
                          z_max=1.0)
        extract_clusters = EuclideanClusterExtraction("extract_clusters",
                                                      min_cluster_size=50,
                                                      cluster_tolerance=0.02)
        extract_largest_cluster = ExtractLargestCluster(
            "extract_largest_cluster")
        nan_filter = PassThrough("nan_removal")
        colorize = ColorizeClusters("colorize", max_clusters=100)
        extract_indices = ExtractIndices("extract_indices", negative=False)

        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2(
            "cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud",
                                                    format=ecto_pcl.XYZRGB)

        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2(
            "cloud_pub", topic_name='/ecto_pcl/cloud_filtered')

        output = ecto_pcl.XYZRGB

        #        plasm.connect(cloud_sub[:] >>  msg2cloud[:],
        #                      msg2cloud[:] >> nan_filter[:],
        #                      nan_filter[:] >> voxel_grid[:],
        #                      voxel_grid[:] >> extract_clusters[:],
        #                      extract_clusters[:] >> extract_largest_cluster["clusters"],
        #                      voxel_grid[:] >> extract_largest_cluster["input"],
        #                      extract_largest_cluster[:] >> cloud2msg[:],
        #                      cloud2msg[:] >> cloud_pub[:])
        #
        #        run_plasm(options, plasm, locals=vars())

        plasm.connect(
            cloud_sub[:] >> msg2cloud[:], msg2cloud[:] >> nan_filter[:],
            nan_filter[:] >> voxel_grid[:],
            voxel_grid[:] >> extract_clusters[:],
            extract_clusters[:] >> extract_largest_cluster["clusters"],
            voxel_grid[:] >> extract_largest_cluster["input"],
            extract_largest_cluster["output"] >> 'out')

        run_plasm(options, plasm, locals=output)
示例#7
0
    def __init__(self):
        ecto_ros.init(sys.argv, "ecto_pcl_demo")

        parser = argparse.ArgumentParser(
            description='My awesome program thing.')

        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()

        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.01)
        passthru_z = PassThrough("passthru_z",
                                 filter_field_name="z",
                                 filter_limit_min=0.0,
                                 filter_limit_max=1.0)
        passthru_x = PassThrough("passthru_x",
                                 filter_field_name="x",
                                 filter_limit_min=-0.25,
                                 filter_limit_max=0.25)
        passthru_y = PassThrough("passthru_y",
                                 filter_field_name="y",
                                 filter_limit_min=-0.25,
                                 filter_limit_max=0.25)
        cropper = Cropper("cropper",
                          x_min=-0.25,
                          x_max=0.25,
                          y_min=-0.25,
                          y_max=0.25,
                          z_min=0.0,
                          z_max=1.0)
        viewer = CloudViewer("viewer", window_name="Clouds!")

        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2(
            "cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud",
                                                    format=ecto_pcl.XYZRGB)

        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2(
            "cloud_pub", topic_name='/ecto_pcl/sample_output')

        plasm.connect(
            cloud_sub[:] >> msg2cloud[:], msg2cloud[:] >> voxel_grid[:],
            voxel_grid[:] >> passthru_z[:], passthru_z[:] >> passthru_x[:],
            passthru_x[:] >> passthru_y[:], passthru_y[:] >> cloud2msg[:],
            cloud2msg[:] >> cloud_pub[:])

        #        plasm.connect(cloud_sub[:] >> msg2cloud[:],
        #                      msg2cloud[:] >> voxel_grid[:],
        #                      voxel_grid[:] >> cropper[:],
        #                      cropper[:] >> cloud2msg[:],
        #                      cloud2msg[:] >> cloud_pub[:])

        run_plasm(options, plasm, locals=vars())
示例#8
0
    def __init__(self):
        ecto_ros.init(sys.argv, "ecto_pcl_demo")

        parser = argparse.ArgumentParser(
            description='Ecto Euclidean Clustering')

        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()

        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.015)
        cropper = Cropper("cropper",
                          x_min=-0.25,
                          x_max=0.25,
                          y_min=-0.25,
                          y_max=0.25,
                          z_min=0.3,
                          z_max=1.0)
        passthru_z = PassThrough("passthru_z",
                                 filter_field_name="z",
                                 filter_limit_min=0.3,
                                 filter_limit_max=1.0)
        passthru_x = PassThrough("passthru_x",
                                 filter_field_name="x",
                                 filter_limit_min=-0.25,
                                 filter_limit_max=0.25)
        passthru_y = PassThrough("passthru_y",
                                 filter_field_name="y",
                                 filter_limit_min=-0.25,
                                 filter_limit_max=0.25)
        extract_clusters = EuclideanClusterExtraction("extract_clusters",
                                                      min_cluster_size=50,
                                                      cluster_tolerance=0.02)
        nan_filter = PassThrough("nan_removal")
        colorize = ColorizeClusters("colorize", max_clusters=100)

        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2(
            "cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud",
                                                    format=ecto_pcl.XYZRGB)

        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2(
            "cloud_pub", topic_name='/ecto_pcl/cloud_filtered')

        plasm.connect(
            cloud_sub[:] >> msg2cloud[:], msg2cloud[:] >> nan_filter[:],
            nan_filter[:] >> voxel_grid[:], voxel_grid[:] >> cropper[:],
            cropper["output"] >> extract_clusters["input"],
            extract_clusters[:] >> colorize["clusters"],
            cropper[:] >> colorize["input"], colorize[:] >> cloud2msg[:],
            cloud2msg[:] >> cloud_pub[:])

        run_plasm(options, plasm, locals=vars())
def init_ros():
    """
    Function that should be called before starting any cell depending on ROS
    """
    # check if roscore is running
    proc = subprocess.Popen(['rostopic', 'list'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    _out, err = proc.communicate()
    if err:
        print('roscore not started: start it or your ROS cell will error out at runtime')
    else:
        ecto_ros.init(sys.argv, DEFAULT_NODE_NAME, False)
    def __init__(self):
        ecto_ros.init(sys.argv,"ecto_pcl_demo")

        # Create the argument parser        
        parser = argparse.ArgumentParser(description='Ecto Euclidean Clustering')
        
        # Use the parser to create scheduler options        
        scheduler_options(parser)
        options = parser.parse_args()

        # Create the overall plasm
        plasm = ecto.Plasm()

        # A VoxelGrid cell
        voxel_grid = ecto_pcl.VoxelGrid("voxel_grid", leaf_size=0.01)
        
        # A Cropper cell
        cropper = ecto_pcl.Cropper("cropper", x_min=-0.35, x_max=0.35, y_min=-0.35, y_max=0.35, z_min=0.3, z_max=1.5)
        
        # A EuclideanClusterExtraction cell
        extract_clusters = ecto_pcl.EuclideanClusterExtraction("extract_clusters", min_cluster_size=50, cluster_tolerance=0.02)
        
        # A cell to filter NaN values from the point cloud
        nan_filter = ecto_pcl.PassThrough("nan_removal")
        
        # A cell to colorize the clusters
        colorize = ecto_pcl.ColorizeClusters("colorize", max_clusters=100)
 
        # Create the subscriber cell with the appropriate point cloud topic     
        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='camera/depth_registered/points')

        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object                
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)

        # A cell to convert a Ecto/PCL point cloud back to a point cloud message        
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        
        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object                
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='ecto_pcl/cloud_filtered')
        
        # Create the plasm by connecting the cells               
        plasm.connect(cloud_sub[:] >>  msg2cloud[:],
                      msg2cloud[:] >> nan_filter[:],
                      nan_filter[:] >> voxel_grid[:],
                      voxel_grid[:] >> cropper[:],
                      cropper[:] >> extract_clusters[:],
                      extract_clusters[:] >> colorize["clusters"],
                      cropper[:] >> colorize["input"],
                      colorize[:] >> cloud2msg[:],
                      cloud2msg[:] >> cloud_pub[:])
        
        # Run the plasm
        run_plasm(options, plasm, locals=vars())
示例#11
0
def init_ros():
    """
    Function that should be called before starting any cell depending on ROS
    """
    # check if roscore is running
    proc = subprocess.Popen(['rostopic', 'list'],
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    _out, err = proc.communicate()
    if err:
        print(
            'roscore not started: start it or your ROS cell will error out at runtime'
        )
    else:
        ecto_ros.init(sys.argv, DEFAULT_NODE_NAME, False)
    def __init__(self):
        ecto_ros.init(sys.argv,"ecto_pcl_demo")
        
        parser = argparse.ArgumentParser(description='Ecto Largest Cluster')
        
        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()

        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.015)
        cropper = Cropper("cropper", x_min=-0.25, x_max=0.25, y_min=-0.25, y_max=0.25, z_min=0.0, z_max=1.0)
        extract_clusters = EuclideanClusterExtraction("extract_clusters", min_cluster_size=50, cluster_tolerance=0.02)
        extract_largest_cluster = ExtractLargestCluster("extract_largest_cluster")
        nan_filter = PassThrough("nan_removal")
        colorize = ColorizeClusters("colorize", max_clusters=100)
        extract_indices = ExtractIndices("extract_indices", negative=False)
      
        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
        
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='/ecto_pcl/cloud_filtered')
        
        output = ecto_pcl.XYZRGB
        
#        plasm.connect(cloud_sub[:] >>  msg2cloud[:],
#                      msg2cloud[:] >> nan_filter[:],
#                      nan_filter[:] >> voxel_grid[:],
#                      voxel_grid[:] >> extract_clusters[:],
#                      extract_clusters[:] >> extract_largest_cluster["clusters"],
#                      voxel_grid[:] >> extract_largest_cluster["input"],
#                      extract_largest_cluster[:] >> cloud2msg[:],
#                      cloud2msg[:] >> cloud_pub[:])
#        
#        run_plasm(options, plasm, locals=vars())

        plasm.connect(cloud_sub[:] >>  msg2cloud[:],
                      msg2cloud[:] >> nan_filter[:],
                      nan_filter[:] >> voxel_grid[:],
                      voxel_grid[:] >> extract_clusters[:],
                      extract_clusters[:] >> extract_largest_cluster["clusters"],
                      voxel_grid[:] >> extract_largest_cluster["input"],
                      extract_largest_cluster["output"] >> 'out')
        
        
        run_plasm(options, plasm, locals=output)
示例#13
0
    def __init__(self):
        ecto_ros.init(sys.argv, "ecto_pcl_demo")

        # Create the argument parser
        parser = argparse.ArgumentParser(description="Ecto PCL Demo")

        # Use the parser to create scheduler options
        scheduler_options(parser)
        options = parser.parse_args()

        # Create the overall plasm
        plasm = ecto.Plasm()

        # Create three PassThrough cells along the x, y and z dimensions
        passthru_z = ecto_pcl.PassThrough(
            "passthru_z", filter_field_name="z", filter_limit_min=0.0, filter_limit_max=1.0
        )
        passthru_x = ecto_pcl.PassThrough(
            "passthru_x", filter_field_name="x", filter_limit_min=-0.25, filter_limit_max=0.25
        )
        passthru_y = ecto_pcl.PassThrough(
            "passthru_y", filter_field_name="y", filter_limit_min=-0.25, filter_limit_max=0.25
        )

        # Create the subscriber cell with the appropriate point cloud topic
        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name="camera/depth_registered/points")

        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)

        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")

        # A publisher cell with the desired topic name
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub", topic_name="ecto_pcl/cloud_filtered")

        # Create the plasm by connecting the cells
        plasm.connect(
            cloud_sub[:] >> msg2cloud[:],
            msg2cloud[:] >> passthru_z[:],
            passthru_z[:] >> passthru_x[:],
            passthru_x[:] >> passthru_y[:],
            passthru_y[:] >> cloud2msg[:],
            cloud2msg[:] >> cloud_pub[:],
        )
        # Run the plasm
        run_plasm(options, plasm, locals=vars())
    def __init__(self):
        ecto_ros.init(sys.argv,"ecto_pcl_demo")
        
        parser = argparse.ArgumentParser(description='My awesome program thing.')
        
        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()

        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.01)
        cropper = Cropper("cropper", x_min=-0.25, x_max=0.25, y_min=-0.25, y_max=0.25, z_min=0.0, z_max=1.0)
        
        convex_hull = ConvexHull("convex_hull")
        ror = RadiusOutlierRemoval("ror", min_neighbors=1, search_radius=1.0)
        sor = StatisticalOutlierRemoval("sor", mean_k=1)

        extract_indices = ExtractIndices("extract_indices", negative=False)
        extract_clusters = EuclideanClusterExtraction("extract_clusters", min_cluster_size=50, cluster_tolerance=0.005)
        extract_largest_cluster = ExtractLargestCluster("extract_largest_cluster")

        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
        
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='/ecto_pcl/sample_output')

        plasm.connect(cloud_sub[:] >> msg2cloud[:],
                      msg2cloud[:] >> extract_clusters[:],
                      extract_clusters[:] >> extract_largest_cluster["clusters"],
                      msg2cloud[:] >> extract_largest_cluster["input"],
                      extract_largest_cluster[:] >> cloud2msg[:],
                      cloud2msg[:] >> cloud_pub[:])
        
#        plasm.connect(cloud_sub[:] >> msg2cloud[:],
#                      msg2cloud[:] >> voxel_grid[:],
#                      msg2cloud[:] >> extract_indices["input"],
#                      voxel_grid[:] >> cropper[:],
#                      cropper[:] >> extract_clusters[:],
#                      extract_indices[:] >> extract_clusters[:],
#                      extract_clusters[:] >> extract_largest_cluster["clusters"],
#                      cropper[:] >>  extract_largest_cluster["input"],
#                      extract_largest_cluster[:] >> cloud2msg[:],
#                      cloud2msg[:] >> cloud_pub[:])
        
        run_plasm(options, plasm, locals=vars())
示例#15
0
    def __init__(self):
        ecto_ros.init(sys.argv, "ecto_pcl_demo")

        # Create the argument parser
        parser = argparse.ArgumentParser(description='Ecto PCL Cropper Demo')

        # Use the parser to create scheduler options
        scheduler_options(parser)
        options = parser.parse_args()

        # Create the overall plasm
        plasm = ecto.Plasm()

        # Create the Cropper cell with the desired dimensions (in meters)
        cropper = ecto_pcl.Cropper("cropper",
                                   x_min=-0.25,
                                   x_max=0.25,
                                   y_min=-0.25,
                                   y_max=0.25,
                                   z_min=0.0,
                                   z_max=1.0)

        # Create the subscriber cell with the appropriate point cloud topic
        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2(
            "cloud_sub", topic_name='camera/depth_registered/points')

        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud",
                                                    format=ecto_pcl.XYZRGB)

        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")

        # A publisher cell with the desired topic name
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2(
            "cloud_pub", topic_name='ecto_pcl/cloud_filtered')

        # Create the plasm by connecting the cells
        plasm.connect(cloud_sub[:] >> msg2cloud[:], msg2cloud[:] >> cropper[:],
                      cropper[:] >> cloud2msg[:], cloud2msg[:] >> cloud_pub[:])

        # Run the plasm
        run_plasm(options, plasm, locals=vars())
示例#16
0
    def __init__(self, plasm):
        """
        sinks: a list of the sinks to use
        """
        ecto.BlackBox.__init__(self, plasm)

        # try to import ecto_ros
        self._do_use_ros = True
        try:
            __import__('ecto_ros')
        except:
            self._do_use_ros = False
        if self._do_use_ros:
            import ecto_ros
            ecto_ros.init(sys.argv, "ecto_node")

        # add the different possible outputs
        self._cell_factory = {}
        self._cells = []
        self._outputs = {}
示例#17
0
def do_ecto(bagname, msg_counts, Scheduler):
    ecto_ros.init(sys.argv, "image_sub_node")
    sub_rgb = ImageSub("image_sub", topic_name='/camera/rgb/image_color', queue_size=0)
    im2mat_rgb = ecto_ros.Image2Mat()
    counter_rgb = ecto.Counter()
    graph = [
                sub_rgb["output"] >> im2mat_rgb["image"],
                im2mat_rgb[:] >> counter_rgb[:],
            ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    sched = Scheduler(plasm)
    sched.execute_async()
    rosbag = play_bag(bagname, delay=0.5)
    wait_bag(rosbag)
    sched.stop()

    print "expecting RGB count:", msg_counts['/camera/rgb/image_color']
    print "RGB count:", counter_rgb.outputs.count
    assert msg_counts['/camera/rgb/image_color'] >= counter_rgb.outputs.count
    assert counter_rgb.outputs.count != 0
示例#18
0
    def __init__(self):
        ecto_ros.init(sys.argv, "ecto_pcl_demo")

        parser = argparse.ArgumentParser(description="My awesome program thing.")

        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()

        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.01)
        passthru_z = PassThrough("passthru_z", filter_field_name="z", filter_limit_min=0.0, filter_limit_max=1.0)
        passthru_x = PassThrough("passthru_x", filter_field_name="x", filter_limit_min=-0.25, filter_limit_max=0.25)
        passthru_y = PassThrough("passthru_y", filter_field_name="y", filter_limit_min=-0.25, filter_limit_max=0.25)
        cropper = Cropper("cropper", x_min=-0.25, x_max=0.25, y_min=-0.25, y_max=0.25, z_min=0.0, z_max=1.0)
        viewer = CloudViewer("viewer", window_name="Clouds!")

        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name="/camera/depth_registered/points")
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)

        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub", topic_name="/ecto_pcl/sample_output")

        plasm.connect(
            cloud_sub[:] >> msg2cloud[:],
            msg2cloud[:] >> voxel_grid[:],
            voxel_grid[:] >> passthru_z[:],
            passthru_z[:] >> passthru_x[:],
            passthru_x[:] >> passthru_y[:],
            passthru_y[:] >> cloud2msg[:],
            cloud2msg[:] >> cloud_pub[:],
        )

        #        plasm.connect(cloud_sub[:] >> msg2cloud[:],
        #                      msg2cloud[:] >> voxel_grid[:],
        #                      voxel_grid[:] >> cropper[:],
        #                      cropper[:] >> cloud2msg[:],
        #                      cloud2msg[:] >> cloud_pub[:])

        run_plasm(options, plasm, locals=vars())
示例#19
0
    def __init__(self):
        ecto_ros.init(sys.argv,"ecto_pcl_demo")

        # Create the argument parser
        parser = argparse.ArgumentParser(description='Ecto PCL Demo')

        # Use the parser to create scheduler options        
        scheduler_options(parser)
        options = parser.parse_args()

        # Create the overall plasm
        plasm = ecto.Plasm()
        
        # Create three PassThrough cells along the x, y and z dimensions
        passthru_z = ecto_pcl.PassThrough("passthru_z", filter_field_name="z", filter_limit_min=0.0, filter_limit_max=1.0)
        passthru_x = ecto_pcl.PassThrough("passthru_x", filter_field_name="x", filter_limit_min=-0.25, filter_limit_max=0.25)
        passthru_y = ecto_pcl.PassThrough("passthru_y", filter_field_name="y", filter_limit_min=-0.25, filter_limit_max=0.25)
      
        # Create the subscriber cell with the appropriate point cloud topic
        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='camera/depth_registered/points')
        
        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object                
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
        
        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object                
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        
        # A publisher cell with the desired topic name
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='ecto_pcl/cloud_filtered')

        # Create the plasm by connecting the cells               
        plasm.connect(cloud_sub[:] >> msg2cloud[:],
                      msg2cloud[:] >> passthru_z[:],
                      passthru_z[:] >> passthru_x[:],
                      passthru_x[:] >> passthru_y[:],
                      passthru_y[:] >> cloud2msg[:],
                      cloud2msg[:] >> cloud_pub[:])
        # Run the plasm
        run_plasm(options, plasm, locals=vars())
示例#20
0
def do_ecto(bagname, msg_counts, Scheduler):
    ecto_ros.init(sys.argv, "image_sub_node")
    sub_rgb = ImageSub("image_sub",
                       topic_name='/camera/rgb/image_color',
                       queue_size=0)
    im2mat_rgb = ecto_ros.Image2Mat()
    counter_rgb = ecto.Counter()
    graph = [
        sub_rgb["output"] >> im2mat_rgb["image"],
        im2mat_rgb[:] >> counter_rgb[:],
    ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    sched = Scheduler(plasm)
    sched.execute_async()
    rosbag = play_bag(bagname, delay=0.5)
    wait_bag(rosbag)
    sched.stop()

    print "expecting RGB count:", msg_counts['/camera/rgb/image_color']
    print "RGB count:", counter_rgb.outputs.count
    assert msg_counts['/camera/rgb/image_color'] >= counter_rgb.outputs.count
    assert counter_rgb.outputs.count != 0
示例#21
0
    def __init__(self):
        ecto_ros.init(sys.argv,"ecto_pcl_demo")

        parser = argparse.ArgumentParser(description='Ecto Euclidean Clustering')

        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()

        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.015)
        cropper = Cropper("cropper", x_min=-0.25, x_max=0.25, y_min=-0.25, y_max=0.25, z_min=0.3, z_max=1.0)
        passthru_z = PassThrough("passthru_z", filter_field_name="z", filter_limit_min=0.3, filter_limit_max=1.0)
        passthru_x = PassThrough("passthru_x", filter_field_name="x", filter_limit_min=-0.25, filter_limit_max=0.25)
        passthru_y = PassThrough("passthru_y", filter_field_name="y", filter_limit_min=-0.25, filter_limit_max=0.25)
        extract_clusters = EuclideanClusterExtraction("extract_clusters", min_cluster_size=50, cluster_tolerance=0.02)
        nan_filter = PassThrough("nan_removal")
        colorize = ColorizeClusters("colorize", max_clusters=100)

        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)

        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='/ecto_pcl/cloud_filtered')

        plasm.connect(cloud_sub[:] >>  msg2cloud[:],
                      msg2cloud[:] >> nan_filter[:],
                      nan_filter[:] >> voxel_grid[:],
                      voxel_grid[:] >> cropper[:],
                      cropper["output"] >> extract_clusters["input"],
                      extract_clusters[:] >> colorize["clusters"],
                      cropper[:] >> colorize["input"],
                      colorize[:] >> cloud2msg[:],
                      cloud2msg[:] >> cloud_pub[:])

        run_plasm(options, plasm, locals=vars())
    def __init__(self):
        ecto_ros.init(sys.argv,"ecto_pcl_demo")
        
        # Create the argument parser
        parser = argparse.ArgumentParser(description='Ecto PCL Cropper Demo')
        
        # Use the parser to create scheduler options
        scheduler_options(parser)
        options = parser.parse_args()

        # Create the overall plasm
        plasm = ecto.Plasm()

        # Create the Cropper cell with the desired dimensions (in meters)
        cropper = ecto_pcl.Cropper("cropper", x_min=-0.25, x_max=0.25, y_min=-0.25, y_max=0.25, z_min=0.0, z_max=1.0)
      
        # Create the subscriber cell with the appropriate point cloud topic
        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='camera/depth_registered/points')
        
        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object                
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
        
        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object                
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        
        # A publisher cell with the desired topic name
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='ecto_pcl/cloud_filtered')

        # Create the plasm by connecting the cells
        plasm.connect(cloud_sub[:] >> msg2cloud[:],
                      msg2cloud[:] >> cropper[:],
                      cropper[:] >> cloud2msg[:],
                      cloud2msg[:] >> cloud_pub[:])
        
        # Run the plasm
        run_plasm(options, plasm, locals=vars())
示例#23
0
# abstract the input.
import ecto
from ecto_opencv import highgui, calib, imgproc
from fiducial_pose_est import *
#lil bit of ros
PKG = 'ecto_ros'  # this package name
import roslib
roslib.load_manifest(PKG)
import ecto_ros, ecto_sensor_msgs, ecto_geometry_msgs
import sys

ImageSub = ecto_sensor_msgs.Subscriber_Image
CameraInfoSub = ecto_sensor_msgs.Subscriber_CameraInfo

if "__main__" == __name__:
    ecto_ros.init(sys.argv, "pose_estimator")

    plasm = ecto.Plasm()
    sched = ecto.schedulers.Singlethreaded(plasm)

    debug = True
    poser = OpposingDotPoseEstimator(plasm,
                                     rows=5,
                                     cols=3,
                                     pattern_type="acircles",
                                     square_size=0.04,
                                     debug=debug)
    camera_info = calib.CameraIntrinsics('Camera Info',
                                         camera_file="camera.yml")
    subs = dict(
        image=ImageSub(topic_name='camera/rgb/image_color', queue_size=0),
示例#24
0
文件: testing.py 项目: chenbk85/mmod
    # add arguments for the source and sink
    Sink.add_arguments(parser)

    params, args, pipeline_params, do_display, db_params, db = read_arguments(parser, argv[1:])

    model_ids = []
    object_ids = set()
    for object_id in params['object_ids']:
        for model_id in models.find_model_for_object(db, object_id, 'MMOD'):
            model_ids.append(str(model_id))
            object_ids.add(object_id)
    model_documents = DbDocuments(db_params, model_ids)

    # TODO handle this properly...
    ecto_ros.init(argv, "mmod_testing", False)#not anonymous.

    source = Source.parse_arguments(params['source'])


    sink = Sink.parse_arguments(args, db, db_params, object_ids)

    #hook up the tester
    mmod_tester = MModTester(thresh_match=0.93,color_filter_thresh=0.8,skip_x=8,skip_y=8, model_documents=model_documents)
    
    # Connect the detector to the source
    pyr_img = mmod.Pyramid(n_levels=3)
    pyr_depth = mmod.Pyramid(n_levels=3)
    for key in source.outputs.iterkeys():
        if key in mmod_tester.inputs.keys():
            if key == 'image':
示例#25
0
#!/usr/bin/env python
"""
This file is meant to be used with test_client.py: it starts a recognition server for 10 seconds
"""
from object_recognition_core.utils.training_detection_args import read_arguments_from_string
from object_recognition_ros.server import RecognitionServer
import rospy
import sys
import ecto_ros
import roslib
import rospy

if __name__ == '__main__':
    ecto_ros.init([], "test_server_ecto_ros", False)

    rospy.init_node('test_server')
    ork_params = read_arguments_from_string("""
    sink:
      type: 'Publisher'
      module: 'object_recognition_ros.io'
    pipeline:
      type: ConstantDetector
      module: object_recognition_core.ecto_cells.pipelines
      outputs: [sink]
    """)

    server = RecognitionServer(ork_params)

    # timeout for the test
    rospy.sleep(10.0)
示例#26
0
#!/usr/bin/env python

"""
This sample shows how to interact with ROS cloud subscribers and publishers.
"""

import sys, ecto, ecto_pcl, ecto_ros, ecto_pcl_ros, ecto_ros.ecto_sensor_msgs as ecto_sensor_msgs

plasm = ecto.Plasm()

cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points')
msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)

cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='/ecto_pcl/sample_output')

plasm.connect(cloud_sub['output'] >> msg2cloud[:],
              msg2cloud[:] >> cloud2msg[:],
              cloud2msg[:] >> cloud_pub[:])


if __name__ == "__main__":
    from ecto.opts import doit
    ecto_ros.init(sys.argv,"ecto_pcl_rossample")
    ecto.view_plasm(plasm)
    doit(plasm, description='Read a pcd through ROS.')
示例#27
0
                    normals[:] >> segment["normals"],
                    # project inliers, find convex hull
                    voxgrid[:] >> project["input"],
                    segment["model"] >> project["model"],
                    project[:] >> conhull[:],
                    # extract stuff on table from original high-res cloud
                    grabber[:] >> prism["input"],
                    conhull[:] >> prism["planar_hull"],
                    prism[:] >> extract["indices"],
                    grabber[:] >> extract["input"],
                    #convert output to ros msg and pub.
                    extract[:] >> clusters[:],
                    clusters[:] >> largest["clusters"],
                    extract[:] >> largest["input"],
                    largest[:] >> pcl2msg[:],
                    pcl2msg[:] >> pub[:]
                  )
    
    sched = ecto.schedulers.Threadpool(plasm)
    #sched.execute()
    sched.execute_async()

    from IPython.Shell import IPShellEmbed
    ipshell = IPShellEmbed()
    ipshell()

if __name__ == "__main__":
    ecto_ros.init(sys.argv, "extract_largest_cluster")
    do_ecto()

示例#28
0
    def __init__(self):
        ecto_ros.init(sys.argv,"ecto_tracker")
        
        parser = argparse.ArgumentParser(description='Ecto Tracker.')
        
        scheduler_options(parser)
        options = parser.parse_args()

        plasm = ecto.Plasm()
                
        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.01)
        passthru_z = PassThrough("passthru_z", filter_field_name="z", filter_limit_min=0.2, filter_limit_max=1.0)
        passthru_x = PassThrough("passthru_x", filter_field_name="x", filter_limit_min=-0.5, filter_limit_max=0.5)
        passthru_y = PassThrough("passthru_y", filter_field_name="y", filter_limit_min=-0.5, filter_limit_max=0.5)
        nan_filter = PassThrough("nan_removal")
        cropper = Cropper("cropper", x_min=-0.5, x_max=0.5, y_min=-0.5, y_max=0.5, z_min=0.0, z_max=1.0)
        #convex_hull = ConvexHull("convex_hull")
#        
        extract_stuff = ExtractPolygonalPrismData("extract_stuff", height_min=0.01, height_max=0.2)
        extract_indices = ExtractIndices("extract_indices", negative=False)
        extract_clusters = EuclideanClusterExtraction("extract_clusters", min_cluster_size=50, cluster_tolerance=0.005)
        extract_largest_cluster = ExtractLargestCluster("extract_largest_cluster")
        colorize = ColorizeClusters("colorize", max_clusters=3)
        merge = MergeClouds("merge")
        viewer = CloudViewer("viewer", window_name="Clouds!")

#        
        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
        
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='/ecto_pcl/sample_output')
        
#        plasm.connect(cloud_sub["output"] >> msg2cloud[:],
#                      msg2cloud[:] >> voxel_grid[:],
##                      voxel_grid[:] >> extract_indices[:],
#                      voxel_grid[:] >> cloud2msg[:],  
#                      cloud2msg[:] >> cloud_pub[:],)
#
##        plasm += [voxel_grid['output'] >> extract_clusters[:],
##                  extract_clusters[:] >> colorize["clusters"],
##                  cloud_generator[:] >> merge["input"],
##          colorize[:] >> merge["input2"],
##          colorize[:] >> viewer[:]
##          ]
#        
#

        plasm.connect(cloud_sub[:] >> msg2cloud[:],
                      msg2cloud[:] >> voxel_grid[:],
                      voxel_grid[:] >> cropper[:],
                      cropper[:] >> extract_clusters[:],
                      msg2cloud[:] >> extract_indices["input"],
                      extract_clusters[:] >> colorize["clusters"],
                      extract_indices[:] >> colorize["input"],
                      msg2cloud[:] >> merge["input"],
                      colorize[:] >> merge["input2"],
                      merge[:] >> cloud2msg[:],
                      cloud2msg[:] >> cloud_pub[:],)

#        plasm.connect(cloud_sub["output"] >> msg2cloud[:],
#                      msg2cloud[:] >> voxel_grid[:],
#                      #voxel_grid[:] >> nan_filter[:],
#                      #nan_filter[:] >> extract_indices["indices"],
#                      voxel_grid[:] >> extract_indices["indices"],
#                      msg2cloud[:] >> extract_indices["input"],)
#                      #extract_indices[:] >> extract_clusters[:],)
##                      extract_clusters[:] >> colorize["clusters"],
##                      extract_indices[:] >> colorize["input"],
##                      msg2cloud[:] >> merge["input"],
##                      colorize[:] >> merge["input2"],
##                      colorize[:] >> viewer[:],)

#        plasm.connect(cloud_sub["output"] >> msg2cloud[:],
#                      msg2cloud[:] >> voxel_grid["input"],
#                      voxel_grid["output"] >> passthru_z["input"],
#                      passthru_z["output"] >> passthru_x["input"],
#                      passthru_x["output"] >> passthru_y["input"],
#                      passthru_y["output"] >> extract_indices["input"],
#                      passthru_y["output"] >> extract_clusters["input"],
#                      extract_clusters[:] >> colorize["clusters"],
#                      extract_indices[:] >> colorize["input"],
#                      msg2cloud[:] >> merge["input"],
#                      colorize[:] >> merge["input2"],
#                      colorize[:] >> viewer[:],)
#
#                      #passthru_y["output"] >> cloud2msg[:],
#                      #cloud2msg[:] >> cloud_pub[:],)
        
        
        run_plasm(options, plasm, locals=vars())
        #sched = ecto.Scheduler(plasm)
        #sched.execute_async()

        time.sleep(10)
#!/usr/bin/env python

import ecto, ecto_pcl, ecto_ros, ecto_pcl_ros
import ecto_ros.ecto_sensor_msgs as ecto_sensor_msgs
from ecto.opts import scheduler_options, run_plasm
import sys
import time
import os
import argparse

import cloud_treatment_ecto.cloud_treatment as cloud_treatment

ecto_ros.init(sys.argv, "locate_ladder")

parser = argparse.ArgumentParser(description='Treatment that segments the'
                'ladder steps from the rest on the scene')
scheduler_options(parser)

parser.add_argument('-p', '--pcdfile'
			, default='ladder_test_5.pcd', help='The pcdfile to input')

options = parser.parse_args()
pcdfilename = options.pcdfile
plasm = ecto.Plasm()

reader = cloud_treatment.PCDReaderCell(	"Reader_ecto",
					filename=pcdfilename)

cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub",
    topic_name='/worldmodel_main/pointcloud_vis')
msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
示例#30
0
pcdfile = os.path.join(os.path.dirname(__file__), 'test.pcd')

if len(sys.argv) > 1:
    pcdfile = sys.argv[1]

reader = ecto_pcl.PCDReader("Reader", filename=pcdfile)

filters = hirop_perception.RegionFilters("tester", maxZ=1)

fusion = hirop_perception.PclFusion()

viewer = ecto_pcl.CloudViewer("viewer", window_name="PCD Viewer")

saver = ecto_pcl.PCDWriter("Saver")

source = hirop_perception.PointCloudRos('source1',
                                        topic_name="/camera/depth/points",
                                        world_frame='base_link')

plasm.connect(source["output"] >> fusion["input"], source["T"] >> fusion["T"],
              source["R"] >> fusion["R"], fusion["output"] >> viewer["input"])

if __name__ == "__main__":
    ecto_ros.init(sys.argv, "ros_test")
    sched = ecto.Scheduler(plasm)
    sched.execute(niter=1)
    #sleep 10 seconds and exit.
    time.sleep(20)
    sched.execute(niter=1)
    time.sleep(30)
示例#31
0
    pcd_msg2depth_msg = ecto_ros.PointCloud22DepthImage()
    plasm = ecto.Plasm()
    plasm.connect([
        bagreader['info'] >> bagwriter['info_rgb'],
        bagreader['info'] >> bagwriter['info_depth'],
        bagreader['rgb'] >> bagwriter['rgb'],
        bagreader['depth'] >> pcd_msg2depth_msg['cloud'],
        pcd_msg2depth_msg['image'] >> bagwriter['depth']
    ])
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()
    return tmp_bag_path


if __name__ == '__main__':
    ecto_ros.init(sys.argv, "ecto_node")

    parser = ArgumentParser()
    parser.add_argument('--folder',
                        dest='folder',
                        help='The Folder where all the NIST bags are located',
                        required=True)
    add_db_arguments(parser)
    args = parser.parse_args()

    # process all the bags in the folder
    for file_name in os.listdir(args.folder):
        if not file_name.endswith('.bag'):
            continue
        # persist the bad to the DB
        object_index = int(file_name[7:9])
sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                             )

im2mat_rgb = ecto_ros.Image2Mat('rgb -> cv::Mat')
camera_info = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
poser = OpposingDotPoseEstimator(plasm,
                                 rows=5, cols=3,
                                 pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                 square_size=0.04, debug=True)

bgr2rgb = imgproc.cvtColor('rgb -> bgr', flag=imgproc.Conversion.RGB2BGR)
rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
display = highgui.imshow('Poses', name='Poses', waitKey=5, autoSize=True)

graph = [sync['image'] >> im2mat_rgb[:],
          im2mat_rgb[:] >> (rgb2gray[:], bgr2rgb[:]),
          bgr2rgb[:] >> poser['color_image'],
          rgb2gray[:] >> poser['image'],
          poser['debug_image'] >> display['input'],
          sync['image_ci'] >> camera_info['camera_info'],
          camera_info['K'] >> poser['K'],
          ]
plasm.connect(graph)

if __name__ == '__main__':
    import sys
    ecto_ros.init(sys.argv, "opposing_dots_pose", False)
    from ecto.opts import doit
    doit(plasm, description='Estimate the pose of an opposing dot fiducial.')

示例#33
0
    def __init__(self):
        ecto_ros.init(sys.argv,"ecto_pcl_demo")

        # Create the argument parser        
        parser = argparse.ArgumentParser(description='Ecto Tabletop Segmentation')
        
        # Use the parser to create scheduler options        
        scheduler_options(parser)
        options = parser.parse_args()

        # Create the overall plasm
        plasm = ecto.Plasm()

        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points')
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=ecto_pcl.XYZRGB)
        
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2("cloud_pub",topic_name='ecto_pcl/cloud_filtered')
        
        voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.01)
        
        graph = [cloud_sub["output"] >> msg2cloud[:],
                 msg2cloud[:] >> voxel_grid[:]
                 ]
        
        # estimate normals, segment and find convex hull
        normals = NormalEstimation("normals", k_search=0, radius_search=0.02)
        planar_segmentation = SACSegmentationFromNormals("planar_segmentation",
                                                         model_type=SACMODEL_NORMAL_PLANE,
                                                         eps_angle=0.09, distance_threshold=0.1)
        project_inliers = ProjectInliers("project_inliers", model_type=SACMODEL_NORMAL_PLANE)
        nan_filter = PassThrough('nan_removal')
        convex_hull = ConvexHull("convex_hull")
        
        graph += [voxel_grid[:] >> normals[:],
                  voxel_grid[:] >> planar_segmentation["input"],
                  normals[:] >> planar_segmentation["normals"],
                  voxel_grid[:] >> project_inliers["input"],
                  planar_segmentation["model"] >> project_inliers["model"],
                  project_inliers[:] >> nan_filter[:],
                  nan_filter[:] >> convex_hull[:]
                  ]
        
        
        # extract stuff on table from original high-res cloud and show in viewer
        extract_stuff = ExtractPolygonalPrismData("extract_stuff", height_min=0.01, height_max=0.2)
        extract_indices = ExtractIndices("extract_indices", negative=False)
        viewer = CloudViewer("viewer", window_name="Clouds!")
        
        graph += [msg2cloud[:] >> extract_stuff["input"],
                  convex_hull[:] >> extract_stuff["planar_hull"],
                  extract_stuff[:] >> extract_indices["indices"],
                  msg2cloud[:] >> extract_indices["input"],
                  extract_indices[:] >> cloud2msg[:],
                  cloud2msg[:] >> cloud_pub[:]
                  ]
        
        plasm.connect(graph)
        
        # Run the plasm
        run_plasm(options, plasm, locals=vars())
            description='Generate observations by 3d rendering.')
        dbtools.add_db_arguments(self.parser)
        scheduler_options(self.parser)
        self.options = self.parser.parse_args()

    def do_ecto(self):

        plasm = ecto.Plasm()
        plasm.connect(self.graph)

        # sched = ecto.Scheduler(plasm)
        # sched.execute(niter=1)

        ecto.view_plasm(plasm)

        # add ecto scheduler args.
        run_plasm(self.options, plasm, locals=vars())


if __name__ == '__main__':
    ecto_ros.init(sys.argv, "observation_renderer")
    observation_renderer = ObservationRenderer()
    # observation_renderer.connect_debug()
    observation_renderer.connect_ros_pose()

    # observation_renderer.connect_ros_image()
    observation_renderer.connect_ros_point_cloud()
    observation_renderer.connect_db_inserter()

    observation_renderer.do_ecto()
示例#35
0
#!/usr/bin/env python
import argparse
from object_recognition.capture import create_openni_bag_capture_plasm
from ecto.opts import scheduler_options, run_plasm
import ecto_ros
import sys
parser = argparse.ArgumentParser(
    description='Capture a bag of data from the kinect.')
scheduler_options(parser)
parser.add_argument('bagname', nargs=1, type=str)
options = parser.parse_args()
plasm = create_openni_bag_capture_plasm(options.bagname[0])
ecto_ros.init(sys.argv, 'data_capture')
run_plasm(options, plasm, locals)
示例#36
0
  4) We then extract the indices of all points that are above the
     plane formed by the convex hull.
  5) Finally, we extract the point cloud corresponding to these
     indices, and display it.

"""

import sys
import ecto, ecto_ros, ecto_pcl_ros, ecto_pcl
import ecto_ros.ecto_sensor_msgs as ecto_sensor_msgs
from ecto_pcl import *
from ecto.opts import doit as ecto_doit, scheduler_options, run_plasm
import argparse
import time

ecto_ros.init(sys.argv, "ecto_tracker")

parser = argparse.ArgumentParser(description='Ecto Tracker.')

scheduler_options(parser)
options = parser.parse_args()

plasm = ecto.Plasm()

cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2(
    "cloud_sub", topic_name='/camera/depth_registered/points')
msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud",
                                            format=ecto_pcl.XYZRGB)

cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")
cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2(
示例#37
0
    subs = dict(image=ImageSub(topic_name='image',queue_size=0),
                depth=ImageSub(topic_name='depth',queue_size=0),
                )
    #The synchronizer expects this dict, and will have an output foreach key:value pair
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    #some rosmsg ~> opencv type converters
    rgb = ecto_ros.Image2Mat()
    depth = ecto_ros.Image2Mat()
    
    #declare a graph that just shows the images using opencv highgui
    graph = [
                sync["image"] >> rgb["image"],
                sync["depth"] >> depth["image"],
                rgb["image"] >> imshow(name="rgb")["image"],
                depth["image"] >> imshow(name="depth")["image"]
            ]
    
    #add the graph to our ecto plasm
    plasm = ecto.Plasm()
    plasm.connect(graph)
    
    #We'll use a threadpool based scheduler to execute this one.
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute() #execute forever

if __name__ == "__main__":
    import sys
    ecto_ros.init(sys.argv, "ecto_node") #strips out ros remappings and initializes roscpp
    do_ecto()
示例#38
0
import ecto, ecto_pcl, ecto_pcl_ros
import ecto_ros, ecto_sensor_msgs
import sys

PointCloudSub = ecto_sensor_msgs.Subscriber_PointCloud2
PointCloudPub = ecto_sensor_msgs.Publisher_PointCloud2

def do_ecto():
    plasm = ecto.Plasm()

    sub = PointCloudSub("Subscriber", topic_name='/camera/rgb/points')
    
    grabber = ecto_pcl_ros.Message2PointCloud("Message2Cloud", format=ecto_pcl.XYZRGB)

    pcl2msg = ecto_pcl_ros.PointCloud2Message("Cloud2Message")

    pub = PointCloudPub("Cloud Publisher", topic_name='/ecto_pcl/pass_it_on')    

    plasm.connect(sub['output'] >> grabber[:],
                  grabber[:] >> pcl2msg[:],
                  pcl2msg[:] >> pub[:])

    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()

if __name__ == "__main__":
    ecto_ros.init(sys.argv, "pass_it_on")
    do_ecto()

示例#39
0
def create_capture_plasm(bag_name,
                         angle_thresh,
                         segmentation_cell,
                         n_desired,
                         orb_template='',
                         res=SXGA_RES,
                         fps=FPS_30,
                         orb_matches=False,
                         preview=False,
                         use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []

    #   Create ros node, subscribing to openni topics
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(argv)
    ecto_ros.init(argv, "object_capture_msd_pcl", False)
    cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2(
        "cloud_sub", topic_name='/camera/depth_registered/points')
    rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image(
        "rgb_image_sub", topic_name='/camera/rgb/image_raw')
    depth_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image(
        "depth_image_sub", topic_name='/camera/depth_registered/image_raw')
    rgb_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo(
        "rgb_camera_info_sub", topic_name='/camera/rgb/camera_info')
    depth_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo(
        "depth_camera_info_sub",
        topic_name='/camera/depth_registered/camera_info')

    #   Converte ros topics to cv types
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
    image2cv = ecto_ros.Image2Mat()
    depth2cv = ecto_ros.Image2Mat()
    rgb_info2cv = ecto_ros.CameraInfo2Cv()
    depth_info2cv = ecto_ros.CameraInfo2Cv()

    graph += [
        cloud_sub['output'] >> msg2cloud[:], rgb_image_sub[:] >> image2cv[:],
        depth_image_sub[:] >> depth2cv[:],
        rgb_camera_info_sub[:] >> rgb_info2cv[:],
        depth_camera_info_sub[:] >> depth_info2cv[:]
    ]

    #display the mask

    display = highgui.imshow(name='Poses')
    graph += [
        image2cv[:] >> display[:],
    ]

    # convert the image to grayscale
    #    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    #    graph += [image2cv[:]  >> rgb2gray[:] ]

    # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only
    poser = OpposingDotPoseEstimator(
        rows=5,
        cols=3,
        pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
        square_size=0.04,
        debug=True)
    graph += [
        image2cv[:] >> poser['color_image'],
        rgb_info2cv['K'] >> poser['K_image'], image2cv[:] >> poser['image']
    ]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell
            )  # return segmentation for tuning of parameters.
示例#40
0
import ecto_ros, ecto_ros.ecto_sensor_msgs as ecto_sensor_msgs
import sys

ImageSub = ecto_sensor_msgs.Subscriber_Image
ImagePub = ecto_sensor_msgs.Publisher_Image


def do_ecto():
    sub_rgb = ImageSub("image_sub", topic_name='/camera/rgb/image_mono')
    sub_depth = ImageSub("depth_sub", topic_name='/camera/depth/image')
    pub_rgb = ImagePub("image_pub", topic_name='my_image')
    pub_depth = ImagePub("depth_pub", topic_name='my_depth')

    graph = [
        sub_rgb["output"] >> pub_rgb["input"],
        sub_depth["output"] >> pub_depth["input"]
    ]

    plasm = ecto.Plasm()
    plasm.connect(graph)

    ecto.view_plasm(plasm)

    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()


if __name__ == "__main__":
    ecto_ros.init(sys.argv, "image_pub")
    do_ecto()
示例#41
0
#!/usr/bin/env python
"""
This file is meant to be used with test_server.py: it starts a recognition server for 5 seconds
"""
from object_recognition_core.utils.training_detection_args import read_arguments_from_string
from object_recognition_ros.server import RecognitionServer
import rospy
import sys
import ecto_ros

#if __name__ == '__main__':
#    rospy.init_node('test_server')
#    ork_params = read_arguments_from_string(open('config_detection_test.json'))
#    server = RecognitionServer(ork_params)
#    rospy.sleep(5.0)

import roslib
import rospy
from std_msgs.msg import String

if __name__ == '__main__':
    original_argv = sys.argv

    ecto_ros.init(original_argv, "server_test", False)

    rospy.init_node('test_server')
    ork_params = read_arguments_from_string(open('config_detection_test.json'))
    server = RecognitionServer(ork_params)
    while not rospy.is_shutdown():
        rospy.sleep(1.0)
示例#42
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired,
                                            sensor, res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []
    
#   Create ros node, subscribing to openni topics
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(argv)
    ecto_ros.init(argv, "object_capture_msd_pcl", False)
    cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points') 
    # openni&freenect requires '/camera/rgb/image_rect_color', openni2 just need '/camera/rgb/image_raw'
    if sensor in ['kinect']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_color')
    elif sensor in ['xtion']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_raw')
    depth_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("depth_image_sub", topic_name='/camera/depth_registered/image_raw')  
    rgb_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("rgb_camera_info_sub", topic_name='/camera/rgb/camera_info')
    depth_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("depth_camera_info_sub", topic_name='/camera/depth_registered/camera_info')
        
    
#   Converte ros topics to cv types
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
    image2cv = ecto_ros.Image2Mat()
    depth2cv = ecto_ros.Image2Mat()
    rgb_info2cv = ecto_ros.CameraInfo2Cv()
    depth_info2cv = ecto_ros.CameraInfo2Cv()


    graph += [ cloud_sub['output'] >> msg2cloud[:],
               rgb_image_sub[:] >> image2cv[:],
               depth_image_sub[:] >> depth2cv[:],
               rgb_camera_info_sub[:] >> rgb_info2cv[:],
               depth_camera_info_sub[:] >> depth_info2cv[:]
                ]
    """            
    rgb_display = highgui.imshow(name='rgb')
    depth_display = highgui.imshow(name='depth')
    graph += [image2cv[:] >> rgb_display[:],
              depth2cv[:] >> depth_display[:],
            ]
            
    #   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ msg2cloud[:] >> viewer[:] ]
    """   
#   Process pointcloud

    # Cut off some parts
    
    cut_x = PassThrough(filter_field_name="x", 
                        filter_limit_min=-0.2,
                        filter_limit_max=0.2)
                        
    cut_y = PassThrough(filter_field_name="y", 
                        filter_limit_min=-0.5,
                        filter_limit_max=0.5)
    cut_z = PassThrough(filter_field_name="z",    
                        filter_limit_min=-0.0,
                        filter_limit_max=1.0)
                        
    graph += [ msg2cloud[:] >> cut_x[:],
               cut_x[:] >> cut_y[:],
               cut_y[:] >> cut_z[:] ]
               
    # Voxel filter
    
    voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.002)
    
    graph += [ cut_z[:] >> voxel_grid[:] ]
               
    # Find plane and delete it
    
    normals = NormalEstimation("normals", k_search=0, radius_search=0.02)
    
    graph += [ voxel_grid[:] >> normals[:] ]
    
    plane_finder = SACSegmentationFromNormals("planar_segmentation",
                                                 model_type=SACMODEL_NORMAL_PLANE,
                                                 eps_angle=0.09, distance_threshold=0.1)
    
    plane_deleter = ExtractIndices(negative=True)
    
    graph += [ voxel_grid[:] >> plane_finder['input'],
               normals[:] >> plane_finder['normals'] ]
    graph += [ voxel_grid[:]  >> plane_deleter['input'],
               plane_finder['inliers'] >> plane_deleter['indices'] ]
#   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ plane_deleter[:] >> viewer[:] ]
    
#   Compute vfh
    
    vfh_signature = VFHEstimation(radius_search=0.01)
    
    graph += [ plane_deleter[:] >>  vfh_signature['input'],
               normals[:] >> vfh_signature['normals'] ]                                                          
    
    # convert the image to grayscale                                                                      
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [image2cv[:]  >> rgb2gray[:] ]
                
    # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only           
    poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
    graph += [ image2cv[:] >> poser['color_image'],
               rgb_info2cv['K'] >> poser['K_image'],
               rgb2gray[:] >> poser['image'] ]         #poser gives us R&T from camera to dot pattern
    
    
    points3d = calib.DepthTo3d()
    
    graph += [ depth_info2cv['K'] >> points3d['K'],
               depth2cv['image'] >> points3d['depth']
             ]
    
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    # Convert K if the resolution is different (the camera should output that)
    
    graph += [ # find the normals
                depth_info2cv['K'] >> compute_normals['K'],
                points3d['points3d'] >> compute_normals['points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                depth_info2cv['K'] >> plane_est['K'],
                points3d['points3d'] >> plane_est['points3d'] ]
    

    
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ depth_info2cv['K'] >> pose_filter['K_depth'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]
               
    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))
                                                          
    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]
    
    trainer_ = Trainer()
    
    graph += [ pose_filter['R', 'T'] >> trainer_['R', 'T'],
               vfh_signature['output'] >> trainer_['features'],
               delta_pose['last'] >> trainer_['last'],
               delta_pose['novel'] >> trainer_['novel'],
               plane_deleter[:] >> trainer_['input'] ]
 
    novel = delta_pose['novel']

    cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")

    graph += [ plane_deleter[:] >> cloud2msg[:] ]   
    
    baggers = dict(points=PointCloudBagger(topic_name='/camera/depth_registered/points'),
                   pose=PoseBagger(topic_name='/camera/pose')
                   )
                   
    bagwriter = ecto.If('Bag Writer if R|T', cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name))
    pcd_writer = ecto.If('msd pcd writer', cell = PCDWriter())
    
    graph += [    cloud2msg[:] >> bagwriter['points'],
                  poseMsg['pose'] >> bagwriter['pose']
                  ]
    graph += [ plane_deleter['output'] >> pcd_writer['input'] ]
    
    delta_pose.inputs.__test__ = True
    pcd_writer.inputs.__test__ = True
    
    graph += [novel >> (bagwriter['__test__'])]  
    graph += [novel >> (pcd_writer['__test__'])]
    
    rt2pose = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")
    rt2pose_filtered = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")      
    posePub = PosePublisher("pose_pub", topic_name='pattern_pose')
    posePub_filtered = PosePublisher("pose_pub_filtered", topic_name='pattern_pose_filtered')
    graph += [ poser['R', 'T'] >> rt2pose['R', 'T'],
               rt2pose['pose'] >> posePub['input'],
               pose_filter['R', 'T'] >> rt2pose_filtered['R', 'T'],
               rt2pose_filtered['pose'] >> posePub_filtered['input'] ]
   
    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
示例#43
0
文件: testing.py 项目: vikiboy/mmod
    # add arguments for the source and sink
    Sink.add_arguments(parser)

    params, args, pipeline_params, do_display, db_params, db = read_arguments(
        parser, argv[1:])

    model_ids = []
    object_ids = set()
    for object_id in params['object_ids']:
        for model_id in models.find_model_for_object(db, object_id, 'MMOD'):
            model_ids.append(str(model_id))
            object_ids.add(object_id)
    model_documents = DbDocuments(db_params, model_ids)

    # TODO handle this properly...
    ecto_ros.init(argv, "mmod_testing", False)  #not anonymous.

    source = Source.parse_arguments(params['source'])

    sink = Sink.parse_arguments(args, db, db_params, object_ids)

    #hook up the tester
    mmod_tester = MModTester(thresh_match=0.93,
                             color_filter_thresh=0.8,
                             skip_x=8,
                             skip_y=8,
                             model_documents=model_documents)

    # Connect the detector to the source
    pyr_img = mmod.Pyramid(n_levels=3)
    pyr_depth = mmod.Pyramid(n_levels=3)
示例#44
0
#!/usr/bin/env python

"""
A VoxelGrid can be used to downsample very dense clouds.
This can be especially useful when estimating normals. 
"""

import ecto, ecto_pcl, ecto_pcl_ros
import ecto_ros, ecto_sensor_msgs
import sys

PointCloudSub = ecto_sensor_msgs.Subscriber_PointCloud2
PointCloudPub = ecto_sensor_msgs.Publisher_PointCloud2

plasm = ecto.Plasm()

sub = PointCloudSub("Subscriber", topic_name="/camera/depth_registered/points")
grabber = ecto_pcl_ros.Message2PointCloud("Message2Cloud", format=ecto_pcl.XYZRGB)
voxgrid = ecto_pcl.VoxelGrid("VGrid", leaf_size=0.05)
pcl2msg = ecto_pcl_ros.PointCloud2Message("Cloud2Message")
pub = PointCloudPub("Cloud Publisher", topic_name="/ecto_pcl/sample_output")

plasm.connect(sub["output"] >> grabber[:], grabber[:] >> voxgrid[:], voxgrid[:] >> pcl2msg[:], pcl2msg[:] >> pub[:])

if __name__ == "__main__":
    ecto_ros.init(sys.argv, "voxel_grid")
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()
import ecto
import ecto_ros, ecto_ros.ecto_sensor_msgs as ecto_sensor_msgs
from ecto_opencv import highgui
import sys

ImageSub = ecto_sensor_msgs.Subscriber_Image

def do_ecto():
    sub_rgb = ImageSub("image_sub",topic_name='/camera/rgb/image_mono')
    sub_depth = ImageSub("depth_sub",topic_name='/camera/depth/image')

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()

    graph = [
                sub_rgb["output"]>>im2mat_rgb["image"],
                im2mat_rgb["image"] >> highgui.imshow("rgb show",name="rgb")[:],
                sub_depth["output"]>> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show",name="depth")[:]
            ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)
    
    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()

if __name__ == "__main__":
    ecto_ros.init(sys.argv,"image_node")
    do_ecto()
示例#46
0
                image_info=CameraInfoSub(topic_name='/camera/rgb/camera_info',queue_size=0),
             )
    
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs
                                 )

    drift_printer = ecto_ros.DriftPrinter()
    
    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()
    
    graph = [
                sync["image"] >> im2mat_rgb["image"],
                im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb", waitKey=5)[:],
                sync[:] >> drift_printer[:],
                sync["depth"] >> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1)[:]
            ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)
    
    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()

    #sched = ecto.schedulers.Singlethreaded(plasm)
    #sched.execute()
if __name__ == "__main__":
    ecto_ros.init(sys.argv, "ecto_node")
    do_ecto()
示例#47
0
    def __init__(self):
        ecto_ros.init(sys.argv, "ecto_pcl_demo")

        # Create the argument parser
        parser = argparse.ArgumentParser(
            description='Ecto Euclidean Clustering')

        # Use the parser to create scheduler options
        scheduler_options(parser)
        options = parser.parse_args()

        # Create the overall plasm
        plasm = ecto.Plasm()

        # A VoxelGrid cell
        voxel_grid = ecto_pcl.VoxelGrid("voxel_grid", leaf_size=0.01)

        # A Cropper cell
        cropper = ecto_pcl.Cropper("cropper",
                                   x_min=-0.35,
                                   x_max=0.35,
                                   y_min=-0.35,
                                   y_max=0.35,
                                   z_min=0.3,
                                   z_max=1.5)

        # A EuclideanClusterExtraction cell
        extract_clusters = ecto_pcl.EuclideanClusterExtraction(
            "extract_clusters", min_cluster_size=50, cluster_tolerance=0.02)

        # A cell to filter NaN values from the point cloud
        nan_filter = ecto_pcl.PassThrough("nan_removal")

        # A cell to colorize the clusters
        colorize = ecto_pcl.ColorizeClusters("colorize", max_clusters=100)

        # Create the subscriber cell with the appropriate point cloud topic
        cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2(
            "cloud_sub", topic_name='camera/depth_registered/points')

        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object
        msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud",
                                                    format=ecto_pcl.XYZRGB)

        # A cell to convert a Ecto/PCL point cloud back to a point cloud message
        cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")

        # A cell to convert a point cloud ROS message to an Ecto/PCL point cloud object
        cloud_pub = ecto_sensor_msgs.Publisher_PointCloud2(
            "cloud_pub", topic_name='ecto_pcl/cloud_filtered')

        # Create the plasm by connecting the cells
        plasm.connect(
            cloud_sub[:] >> msg2cloud[:], msg2cloud[:] >> nan_filter[:],
            nan_filter[:] >> voxel_grid[:], voxel_grid[:] >> cropper[:],
            cropper[:] >> extract_clusters[:],
            extract_clusters[:] >> colorize["clusters"],
            cropper[:] >> colorize["input"], colorize[:] >> cloud2msg[:],
            cloud2msg[:] >> cloud_pub[:])

        # Run the plasm
        run_plasm(options, plasm, locals=vars())
示例#48
0
roslib.load_manifest(PKG)
import ecto
import ecto_ros, ecto_sensor_msgs
import sys

ImageSub = ecto_sensor_msgs.Subscriber_Image
ImagePub = ecto_sensor_msgs.Publisher_Image


def do_ecto():
    sub_rgb = ImageSub("image_sub", topic_name="/camera/rgb/image_mono")
    sub_depth = ImageSub("depth_sub", topic_name="/camera/depth/image")
    pub_rgb = ImagePub("image_pub", topic_name="my_image")
    pub_depth = ImagePub("depth_pub", topic_name="my_depth")

    graph = [sub_rgb["output"] >> pub_rgb["input"], sub_depth["output"] >> pub_depth["input"]]

    plasm = ecto.Plasm()
    plasm.connect(graph)

    ecto.view_plasm(plasm)

    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()


if __name__ == "__main__":
    ecto_ros.init(sys.argv, "image_pub")
    do_ecto()
示例#49
0
import sys

PointCloudSub = ecto_sensor_msgs.Subscriber_PointCloud2
PointCloudPub = ecto_sensor_msgs.Publisher_PointCloud2

def do_ecto():
    plasm = ecto.Plasm()

    sub = PointCloudSub("Subscriber", topic_name='/camera/depth_registered/points')
    
    grabber = ecto_pcl_ros.Message2PointCloud("Message2Cloud", format=ecto_pcl.XYZRGB)

    passthru = ecto_pcl.PassThrough("Pass", filter_field_name="z", filter_limit_max=2.0)

    pcl2msg = ecto_pcl_ros.PointCloud2Message("Cloud2Message", )

    pub = PointCloudPub("Cloud Publisher", topic_name='/ecto_pcl/sample_output')    

    plasm.connect(sub['output'] >> grabber[:],
                  grabber[:] >> passthru[:],
                  passthru[:] >> pcl2msg[:],
                  pcl2msg[:] >> pub[:])

    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()

if __name__ == "__main__":
    ecto_ros.init(sys.argv, "passthrough")
    do_ecto()

        '-a',
        '--angle_thresh',
        metavar='RADIANS',
        dest='angle_thresh',
        type=float,
        default=math.pi / 36,
        help='''The delta angular threshold in pose. Default is pi/36 radians.
                            Frames will not be recorded unless they are not closer to any other pose by this amount.
                            ''')

    from ecto.opts import scheduler_options
    #add ecto scheduler args.
    group = parser.add_argument_group('ecto scheduler options')
    scheduler_options(group, default_scheduler='Threadpool')
    args = parser.parse_args()
    if len(args.bag) < 1:
        print parser.print_help()
        sys.exit(1)
    return args


if "__main__" == __name__:
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(sys.argv)
    options = parse_args()
    ecto_ros.init(argv, "openni_capture", False)
    plasm = openni_capture.create_capture_plasm(options.bag,
                                                options.angle_thresh)
    from ecto.opts import run_plasm
    run_plasm(options, plasm)
示例#51
0
    )
    #The synchronizer expects this dict, and will have an output foreach key:value pair
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    #some rosmsg ~> opencv type converters
    rgb = ecto_ros.Image2Mat()
    depth = ecto_ros.Image2Mat()

    #declare a graph that just shows the images using opencv highgui
    graph = [
        sync["image"] >> rgb["image"], sync["depth"] >> depth["image"],
        rgb["image"] >> imshow(name="rgb")["image"],
        depth["image"] >> imshow(name="depth")["image"]
    ]

    #add the graph to our ecto plasm
    plasm = ecto.Plasm()
    plasm.connect(graph)

    #We'll use a threadpool based scheduler to execute this one.
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()  #execute forever


if __name__ == "__main__":
    import sys
    ecto_ros.init(
        sys.argv,
        "ecto_node")  #strips out ros remappings and initializes roscpp
    do_ecto()
#!/usr/bin/env python

import ecto, ecto_pcl, ecto_ros, ecto_pcl_ros
import ecto_ros.ecto_sensor_msgs as ecto_sensor_msgs
from ecto.opts import scheduler_options, run_plasm
import sys
import time
import os
import argparse

import cloud_treatment_ecto.cloud_treatment as cloud_treatment

ecto_ros.init(sys.argv, "locate_ladder")

parser = argparse.ArgumentParser(description='Treatment that segments the'
                                 'ladder steps from the rest on the scene')
scheduler_options(parser)

parser.add_argument('-p',
                    '--pcdfile',
                    default='ladder_test_5.pcd',
                    help='The pcdfile to input')

options = parser.parse_args()
pcdfilename = options.pcdfile
plasm = ecto.Plasm()

reader = cloud_treatment.PCDReaderCell("Reader_ecto", filename=pcdfilename)

cloud_sub = ecto_sensor_msgs.Subscriber_PointCloud2(
    "cloud_sub", topic_name='/worldmodel_main/pointcloud_vis')
示例#53
0
    sub = PointCloudSub("Subscriber", topic_name='/camera/depth_registered/points')
    
    grabber = ecto_pcl_ros.Message2PointCloud("Message2Cloud", format=ecto_pcl.XYZRGB)

    p1 = ecto_pcl.PassThrough("Pass", filter_field_name="z", filter_limit_max=2.0)
    p2 = ecto_pcl.PassThrough("Pass2", filter_field_name="z", filter_limit_min=3.0, filter_limit_max=8.0)

    voxgrid = ecto_pcl.VoxelGrid("VGrid", leaf_size=0.1)
    merge = ecto_pcl.MergeClouds("Merge")

    pcl2msg = ecto_pcl_ros.PointCloud2Message("Cloud2Message")

    pub = PointCloudPub("Cloud Publisher", topic_name='/ecto_pcl/sample_output')    

    plasm.connect(sub['output'] >> grabber[:],
                  grabber[:] >> p1[:],
                  grabber[:] >> p2[:],
                  p1[:] >> voxgrid[:],
                  voxgrid[:] >> merge["input"],
                  p2[:] >> merge["input2"],
                  merge[:] >> pcl2msg[:],
                  pcl2msg[:] >> pub[:])

    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()

if __name__ == "__main__":
    ecto_ros.init(sys.argv, "merge_clouds")
    do_ecto()

示例#54
0
    ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    sched = Scheduler(plasm)
    sched.execute_async()
    rosbag = play_bag(bagname, delay=1,
                      rate=0.5)  #rate hack for fidelity in message count FIXME
    wait_bag(rosbag)
    time.sleep(0.1)
    sched.stop()

    print "expecting RGB count:", msg_counts['/camera/rgb/image_color']
    print "RGB count:", counter_rgb.outputs.count
    print "expecting Depth count:", msg_counts['/camera/depth/image']
    print "Depth count:", counter_depth.outputs.count
    assert msg_counts['/camera/rgb/image_color'] >= counter_rgb.outputs.count
    assert msg_counts['/camera/rgb/image_color'] >= counter_depth.outputs.count
    assert counter_rgb.outputs.count != 0
    assert counter_depth.outputs.count != 0


if __name__ == "__main__":
    bagname = sys.argv[1]
    msg_counts = bag_counts(bagname)
    try:
        roscore = start_roscore(delay=1)
        ecto_ros.init(sys.argv, "image_sub_node")
        do_ecto(bagname, msg_counts, ecto.schedulers.Singlethreaded)
    finally:
        roscore.terminate()
示例#55
0
        return srv.ComputeECGraphResponse(self.outputgraph, self.found_objects)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('yaml_file', type=str)
    parser.add_argument('--debug', action='store_true', default=False)
    parser.add_argument('--showgraph', action='store_true', default=False)
    parser.add_argument('--service', action='store_true', default=False)
    myargv = rospy.myargv(argv=sys.argv)[1:]
    args = parser.parse_args(myargv)

    rospy.init_node('plasm_yaml_ros_service')

    ecto_ros.init(myargv, 'plasm_yaml_ros_service', anonymous=False)

    rospack = rospkg.RosPack()
    if any([args.yaml_file.startswith(x) for x in [".", "/"]]):
        yml_file = [args.yaml_file]
    else:
        yml_file = map(
            lambda x: os.path.join(rospack.get_path("ecto_rbo_yaml"), "data/",
                                   x), [args.yaml_file])

    ecto_plasm, ecto_cells = plasm_yaml_factory.load(yml_file,
                                                     debug=args.debug)

    rospy.loginfo("Plasm initialized: %s" % (yml_file))

    if args.showgraph:
示例#56
0
import sys
import math

visualize = True
debug_graphs = True
dir = "/wg/stor2a/mkrainin/object_data/perception challenge/object_meshes"
#noise
rotation = 10*math.pi/180
translation = 0.03
#optimization params
use_icp = False  #uses sensor model instead if false
restarts = 10

if __name__ == "__main__":
    node_name = "corrector_test"
    ecto_ros.init(sys.argv, node_name,False)
    
    """
    
    Pose refinement subplasm
    
    """
    sub_plasm = ecto.Plasm()
    
    #subscribers/converters
    sub_cloud = ecto_sensor_msgs.Subscriber_PointCloud2("Cloud2 Subscriber",
                                        topic_name="/camera/rgb/points")
    sub_info = ecto_sensor_msgs.Subscriber_CameraInfo("Cam Info Subscriber",
                                        topic_name="/camera/rgb/camera_info")
    msg2cloud = ecto_pcl_ros.Message2PointCloud("Cloud2 To Type-Erased",
                                        format=ecto_pcl.XYZRGB)
示例#57
0
    def handle_clean(self, req):
        print "cleannig point cloude"
        self.pclFunsion.params.clean = True
        self.sched = ecto.Scheduler(self.cleanPlasm)
        lock.acquire()
        self.keepRun = self.sched.execute(niter=1)
        lock.release()
        self.pclFunsion.params.clean = False
        return CleanPCLResponse(0)


def exit(signum, frame):
    print('Stopping perception bridge')
    exit()


if __name__ == "__main__":
    rospy.init_node("perception_bridge")
    ecto_ros.init(sys.argv, "ros_perception")
    p = PerceptionSever()
    p.start()
    signal.signal(signal.SIGINT, exit)
    while not rospy.is_shutdown():
        signal.signal(signal.SIGINT, exit)
        lock.acquire()
        if not p.keepRun:
            print('Stopping perception bridge')
            exit()
        lock.release()
        time.sleep(0.5)