示例#1
0
    def configure(self, _p, _i, _o):
        #ROS message converters
        self._depth_converter = ecto_ros.Image2Mat()
        self._rgb_image = ecto_ros.Image2Mat(swap_rgb=True)

        #these transform the depth into something usable
        self._points3d = DepthTo3d()
示例#2
0
def do_ecto():
    baggers = dict(image=ImageBagger(topic_name='/camera/rgb/image_color'),
                   depth=ImageBagger(topic_name='/camera/depth/image'),
                   )

    bagreader = ecto_ros.BagReader('Bag Ripper',
                                    baggers=baggers,
                                    bag=sys.argv[1],
                                  )
    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()

    graph = [
                bagreader["image"] >> im2mat_rgb["image"],
                im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb")[:],
                bagreader["depth"] >> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show", name="depth")[:]
            ]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)

    sched = ecto.Scheduler(plasm)
    sched.execute()
示例#3
0
def do_ecto(bagname, msg_counts, Scheduler):
    sub_rgb = ImageSub("image_sub",
                       topic_name='/camera/rgb/image_color',
                       queue_size=0)
    sub_depth = ImageSub("depth_sub",
                         topic_name='/camera/depth/image',
                         queue_size=0)

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()
    counter_rgb = ecto.Counter(every=10)
    counter_depth = ecto.Counter()
    graph = [
        sub_rgb["output"] >> im2mat_rgb["image"],
        sub_depth["output"] >> im2mat_depth["image"],
        im2mat_rgb[:] >> counter_rgb[:], im2mat_depth[:] >> counter_depth[:]
    ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    sched = Scheduler(plasm)
    sched.execute_async()
    rosbag = play_bag(bagname, delay=1,
                      rate=0.5)  #rate hack for fidelity in message count FIXME
    wait_bag(rosbag)
    time.sleep(0.1)
    sched.stop()

    print "expecting RGB count:", msg_counts['/camera/rgb/image_color']
    print "RGB count:", counter_rgb.outputs.count
    print "expecting Depth count:", msg_counts['/camera/depth/image']
    print "Depth count:", counter_depth.outputs.count
    assert msg_counts['/camera/rgb/image_color'] >= counter_rgb.outputs.count
    assert msg_counts['/camera/rgb/image_color'] >= counter_depth.outputs.count
    assert counter_rgb.outputs.count != 0
    assert counter_depth.outputs.count != 0
示例#4
0
def do_ecto():
    #Set up the ecto_ros subscribers, with a dict of output name -> subscriber cell
    subs = dict(
        image=ImageSub(topic_name='image', queue_size=0),
        depth=ImageSub(topic_name='depth', queue_size=0),
    )
    #The synchronizer expects this dict, and will have an output foreach key:value pair
    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    #some rosmsg ~> opencv type converters
    rgb = ecto_ros.Image2Mat()
    depth = ecto_ros.Image2Mat()

    #declare a graph that just shows the images using opencv highgui
    graph = [
        sync["image"] >> rgb["image"], sync["depth"] >> depth["image"],
        rgb["image"] >> imshow(name="rgb")["image"],
        depth["image"] >> imshow(name="depth")["image"]
    ]

    #add the graph to our ecto plasm
    plasm = ecto.Plasm()
    plasm.connect(graph)

    #We'll use a threadpool based scheduler to execute this one.
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()  #execute forever
示例#5
0
def do_ecto():
    baggers = dict(
        image=ImageBagger(topic_name='/camera/rgb/image_mono'),
        depth=ImageBagger(topic_name='/camera/depth/image_raw'),
    )

    bagwriter = ecto_ros.BagWriter(
        'Bag Writer',
        baggers=baggers,
        bag=sys.argv[1],
    )
    subs = dict(
        image=ImageSub(topic_name='/camera/rgb/image_mono', queue_size=0),
        depth=ImageSub(topic_name='/camera/depth/image_raw', queue_size=0),
    )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()

    graph = [
        sync["image"] >> im2mat_rgb["image"],
        im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb")[:],
        sync[:] >> bagwriter[:], sync["depth"] >> im2mat_depth["image"],
        im2mat_depth["image"] >> highgui.imshow("depth show", name="depth")[:]
    ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)

    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute(niter=30)  #capture a second.
    def connect_background_source(self):
        json_db_str = '{"type": "CouchDB", "root": "http://localhost:5984", "collection": "object_recognition"}'
        self.json_db = ecto.Constant(value=json_db_str)

        object_id_str = '4680aac58c1d263b9449d57bd2000f27'
        self.object_id = ecto.Constant(value=object_id_str)

        self.image_ci = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
        self.image = ecto_ros.Image2Mat()
        self.depth = ecto_ros.Image2Mat()

        bag = "/home/sam/rosbags/sigverse/no_objects.bag"

        ImageBagger = ecto_sensor_msgs.Bagger_Image
        CameraInfoBagger = ecto_sensor_msgs.Bagger_CameraInfo

        baggers = dict(
            image=ImageBagger(
                topic_name='/hsrb/head_rgbd_sensor/rgb/image_raw'),
            image_ci=CameraInfoBagger(
                topic_name='/hsrb/head_rgbd_sensor/rgb/camera_info'),
            depth=ImageBagger(
                topic_name='/hsrb/head_rgbd_sensor/depth/image_raw'),
        )

        # this will read all images in the path
        path = '/home/sam/Code/vision/VOCdevkit/VOC2012/JPEGImages'
        self.file_source = ImageReader(path=os.path.expanduser(path))

        self.bag_reader = ecto_ros.BagReader('Bag Reader',
                                             baggers=baggers,
                                             bag=bag,
                                             random_access=True,
                                             )

        self.rgb = imgproc.cvtColor(
            'bgr -> rgb', flag=imgproc.Conversion.BGR2RGB)
        self.display = highgui.imshow(name='Training Data', waitKey=10000)
        self.image_mux = ecto_yolo.ImageMux()

        self.graph += [
            self.bag_reader['image'] >> self.image['image'],
            self.bag_reader['depth'] >> self.depth['image'],
            self.image[:] >> self.rgb[:],
            self.bag_reader['image_ci'] >> self.image_ci['camera_info'],
            self.image_ci['K'] >> self.image_mux['K1'],
            self.rgb['image'] >> self.image_mux['image1'],
            self.depth['image'] >> self.image_mux['depth1'],
            self.file_source['image'] >> self.image_mux['image2'],
        ]
示例#7
0
def do_ecto():
    sub_rgb = ImageSub("image_sub", topic_name='/camera/rgb/image_mono')
    sub_depth = ImageSub("depth_sub", topic_name='/camera/depth/image')

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()

    graph = [
        sub_rgb["output"] >> im2mat_rgb["image"],
        im2mat_rgb["image"] >> highgui.imshow("rgb show", name="rgb")[:],
        sub_depth["output"] >> im2mat_depth["image"],
        im2mat_depth["image"] >> highgui.imshow("depth show", name="depth")[:]
    ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    ecto.view_plasm(plasm)

    sched = ecto.Scheduler(plasm)
    sched.execute()
示例#8
0
def do_ecto(bagname, msg_counts, Scheduler):
    ecto_ros.init(sys.argv, "image_sub_node")
    sub_rgb = ImageSub("image_sub",
                       topic_name='/camera/rgb/image_color',
                       queue_size=0)
    im2mat_rgb = ecto_ros.Image2Mat()
    counter_rgb = ecto.Counter()
    graph = [
        sub_rgb["output"] >> im2mat_rgb["image"],
        im2mat_rgb[:] >> counter_rgb[:],
    ]
    plasm = ecto.Plasm()
    plasm.connect(graph)
    sched = Scheduler(plasm)
    sched.execute_async()
    rosbag = play_bag(bagname, delay=0.5)
    wait_bag(rosbag)
    sched.stop()

    print "expecting RGB count:", msg_counts['/camera/rgb/image_color']
    print "RGB count:", counter_rgb.outputs.count
    assert msg_counts['/camera/rgb/image_color'] >= counter_rgb.outputs.count
    assert counter_rgb.outputs.count != 0
示例#9
0
def create_capture_plasm(bag_name, angle_thresh, segmentation_cell, n_desired,
                                            sensor, res=SXGA_RES, fps=FPS_30,
                                            orb_matches=False,
                                            preview=False, use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []
    
#   Create ros node, subscribing to openni topics
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(argv)
    ecto_ros.init(argv, "object_capture_msd_pcl", False)
    cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2("cloud_sub", topic_name='/camera/depth_registered/points') 
    # openni&freenect requires '/camera/rgb/image_rect_color', openni2 just need '/camera/rgb/image_raw'
    if sensor in ['kinect']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_color')
    elif sensor in ['xtion']: 
        rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("rgb_image_sub", topic_name='/camera/rgb/image_raw')
    depth_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image("depth_image_sub", topic_name='/camera/depth_registered/image_raw')  
    rgb_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("rgb_camera_info_sub", topic_name='/camera/rgb/camera_info')
    depth_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo("depth_camera_info_sub", topic_name='/camera/depth_registered/camera_info')
        
    
#   Converte ros topics to cv types
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
    image2cv = ecto_ros.Image2Mat()
    depth2cv = ecto_ros.Image2Mat()
    rgb_info2cv = ecto_ros.CameraInfo2Cv()
    depth_info2cv = ecto_ros.CameraInfo2Cv()


    graph += [ cloud_sub['output'] >> msg2cloud[:],
               rgb_image_sub[:] >> image2cv[:],
               depth_image_sub[:] >> depth2cv[:],
               rgb_camera_info_sub[:] >> rgb_info2cv[:],
               depth_camera_info_sub[:] >> depth_info2cv[:]
                ]
    """            
    rgb_display = highgui.imshow(name='rgb')
    depth_display = highgui.imshow(name='depth')
    graph += [image2cv[:] >> rgb_display[:],
              depth2cv[:] >> depth_display[:],
            ]
            
    #   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ msg2cloud[:] >> viewer[:] ]
    """   
#   Process pointcloud

    # Cut off some parts
    
    cut_x = PassThrough(filter_field_name="x", 
                        filter_limit_min=-0.2,
                        filter_limit_max=0.2)
                        
    cut_y = PassThrough(filter_field_name="y", 
                        filter_limit_min=-0.5,
                        filter_limit_max=0.5)
    cut_z = PassThrough(filter_field_name="z",    
                        filter_limit_min=-0.0,
                        filter_limit_max=1.0)
                        
    graph += [ msg2cloud[:] >> cut_x[:],
               cut_x[:] >> cut_y[:],
               cut_y[:] >> cut_z[:] ]
               
    # Voxel filter
    
    voxel_grid = VoxelGrid("voxel_grid", leaf_size=0.002)
    
    graph += [ cut_z[:] >> voxel_grid[:] ]
               
    # Find plane and delete it
    
    normals = NormalEstimation("normals", k_search=0, radius_search=0.02)
    
    graph += [ voxel_grid[:] >> normals[:] ]
    
    plane_finder = SACSegmentationFromNormals("planar_segmentation",
                                                 model_type=SACMODEL_NORMAL_PLANE,
                                                 eps_angle=0.09, distance_threshold=0.1)
    
    plane_deleter = ExtractIndices(negative=True)
    
    graph += [ voxel_grid[:] >> plane_finder['input'],
               normals[:] >> plane_finder['normals'] ]
    graph += [ voxel_grid[:]  >> plane_deleter['input'],
               plane_finder['inliers'] >> plane_deleter['indices'] ]
#   Create pointcloud viewer
    viewer = CloudViewer("viewer", window_name="Clouds!")    
    graph += [ plane_deleter[:] >> viewer[:] ]
    
#   Compute vfh
    
    vfh_signature = VFHEstimation(radius_search=0.01)
    
    graph += [ plane_deleter[:] >>  vfh_signature['input'],
               normals[:] >> vfh_signature['normals'] ]                                                          
    
    # convert the image to grayscale                                                                      
    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    graph += [image2cv[:]  >> rgb2gray[:] ]
                
    # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only           
    poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                     pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                     square_size=0.04, debug=True)
    graph += [ image2cv[:] >> poser['color_image'],
               rgb_info2cv['K'] >> poser['K_image'],
               rgb2gray[:] >> poser['image'] ]         #poser gives us R&T from camera to dot pattern
    
    
    points3d = calib.DepthTo3d()
    
    graph += [ depth_info2cv['K'] >> points3d['K'],
               depth2cv['image'] >> points3d['depth']
             ]
    
    plane_est = PlaneFinder(min_size=10000)
    compute_normals = ComputeNormals()
    # Convert K if the resolution is different (the camera should output that)
    
    graph += [ # find the normals
                depth_info2cv['K'] >> compute_normals['K'],
                points3d['points3d'] >> compute_normals['points3d'],
                # find the planes
                compute_normals['normals'] >> plane_est['normals'],
                depth_info2cv['K'] >> plane_est['K'],
                points3d['points3d'] >> plane_est['points3d'] ]
    

    
    pose_filter = object_recognition_capture.ecto_cells.capture.PlaneFilter();

    # make sure the pose is centered at the origin of the plane
    graph += [ depth_info2cv['K'] >> pose_filter['K_depth'],
               poser['R', 'T'] >> pose_filter['R', 'T'],
               plane_est['planes', 'masks'] >> pose_filter['planes', 'masks'] ]
               
    delta_pose = ecto.If('delta R|T', cell=object_recognition_capture.DeltaRT(angle_thresh=angle_thresh,
                                                          n_desired=n_desired))
                                                          
    poseMsg = RT2PoseStamped(frame_id='/camera_rgb_optical_frame')

    graph += [ pose_filter['R', 'T', 'found'] >> delta_pose['R', 'T', 'found'],
               pose_filter['R', 'T'] >> poseMsg['R', 'T'] ]
    
    trainer_ = Trainer()
    
    graph += [ pose_filter['R', 'T'] >> trainer_['R', 'T'],
               vfh_signature['output'] >> trainer_['features'],
               delta_pose['last'] >> trainer_['last'],
               delta_pose['novel'] >> trainer_['novel'],
               plane_deleter[:] >> trainer_['input'] ]
 
    novel = delta_pose['novel']

    cloud2msg = ecto_pcl_ros.PointCloud2Message("cloud2msg")

    graph += [ plane_deleter[:] >> cloud2msg[:] ]   
    
    baggers = dict(points=PointCloudBagger(topic_name='/camera/depth_registered/points'),
                   pose=PoseBagger(topic_name='/camera/pose')
                   )
                   
    bagwriter = ecto.If('Bag Writer if R|T', cell=ecto_ros.BagWriter(baggers=baggers, bag=bag_name))
    pcd_writer = ecto.If('msd pcd writer', cell = PCDWriter())
    
    graph += [    cloud2msg[:] >> bagwriter['points'],
                  poseMsg['pose'] >> bagwriter['pose']
                  ]
    graph += [ plane_deleter['output'] >> pcd_writer['input'] ]
    
    delta_pose.inputs.__test__ = True
    pcd_writer.inputs.__test__ = True
    
    graph += [novel >> (bagwriter['__test__'])]  
    graph += [novel >> (pcd_writer['__test__'])]
    
    rt2pose = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")
    rt2pose_filtered = RT2PoseStamped(frame_id = "camera_rgb_optical_frame")      
    posePub = PosePublisher("pose_pub", topic_name='pattern_pose')
    posePub_filtered = PosePublisher("pose_pub_filtered", topic_name='pattern_pose_filtered')
    graph += [ poser['R', 'T'] >> rt2pose['R', 'T'],
               rt2pose['pose'] >> posePub['input'],
               pose_filter['R', 'T'] >> rt2pose_filtered['R', 'T'],
               rt2pose_filtered['pose'] >> posePub_filtered['input'] ]
   
    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell) # return segmentation for tuning of parameters.
示例#10
0
    display = highgui.imshow('Poses', name='Poses', waitKey=2, autoSize=True)
    mask_display = highgui.imshow('Masks',
                                  name='Masks',
                                  waitKey=-1,
                                  autoSize=True)
    object_display = highgui.imshow('Object',
                                    name='Object',
                                    waitKey=-1,
                                    autoSize=True)

    imsaver = highgui.ImageSaver('pose image saver', filename='image_pose_')
    rawsaver = highgui.ImageSaver('raw image saver', filename='image_raw_')
    masker = calib.PlanarSegmentation(z_min=0.015, y_crop=0.15, x_crop=0.15)
    bitwise_and = imgproc.BitwiseAnd()

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()

    plasm.connect(
        sync["image"] >> im2mat_rgb["image"], im2mat_rgb["image"] >>
        (brg2rgb[:], ), sync["depth"] >> im2mat_depth['image'], brg2rgb[:] >>
        (rgb2gray[:], poser['color_image'], rawsaver['image'],
         bitwise_and['b']), rgb2gray[:] >> poser['image'],
        poser['debug_image'] >> (display['input'], imsaver['image']),
        display['out'] >> (imsaver['trigger'], rawsaver['trigger']),
        camera_info['K'] >> (poser['K'], masker['K']),
        poser['R', 'T'] >> masker['R', 'T'],
        im2mat_depth['image'] >> masker['depth'], masker['mask'] >>
        (mask_display[:], gray2rgb[:]), gray2rgb[:] >> bitwise_and['a'],
        bitwise_and[:] >> object_display[:])
bag = "/home/sam/rosbags/sigverse/stroll.bag"
ImageBagger = ecto_sensor_msgs.Bagger_Image
CameraInfoBagger = ecto_sensor_msgs.Bagger_CameraInfo
baggers = dict(
    image=ImageBagger(topic_name='/hsrb/head_rgbd_sensor/rgb/image_raw'),
    image_ci=CameraInfoBagger(topic_name='/hsrb/head_rgbd_sensor/rgb/camera_info'),
)

source = ecto_ros.BagReader(
    'Bag Reader',
    baggers=baggers,
    bag=bag,
    random_access=False,
)

image = ecto_ros.Image2Mat()
rgb = imgproc.cvtColor('bgr -> rgb', flag=imgproc.Conversion.BGR2RGB)
display = imshow(name='RGB', triggers=dict(save=ord('s')))

detector = ecto_yolo.Detector(
    data_config="/home/sam/OrkData/cfg/ork.dataset",
    network_config="/home/sam/OrkData/cfg/yolov3-ork.cfg",
    weights="/home/sam/OrkData/weights/yolov3-ork_34000.weights",
    detection_threshold=0.2,
)

# ecto options
scheduler_options(parser)
args = parser.parse_args()

plasm = ecto.Plasm()
示例#12
0
def create_capture_plasm(bag_name,
                         angle_thresh,
                         segmentation_cell,
                         n_desired,
                         orb_template='',
                         res=SXGA_RES,
                         fps=FPS_30,
                         orb_matches=False,
                         preview=False,
                         use_turn_table=True):
    '''
    Creates a plasm that will capture openni data into a bag, using a dot pattern to sparsify views.
    
    @param bag_name: A filename for the bag, will write to this file.
    @param angle_thresh: The angle threshhold in radians to sparsify the views with.  
    '''
    graph = []

    #   Create ros node, subscribing to openni topics
    argv = sys.argv[:]
    ecto_ros.strip_ros_args(argv)
    ecto_ros.init(argv, "object_capture_msd_pcl", False)
    cloud_sub = ecto_ros.ecto_sensor_msgs.Subscriber_PointCloud2(
        "cloud_sub", topic_name='/camera/depth_registered/points')
    rgb_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image(
        "rgb_image_sub", topic_name='/camera/rgb/image_raw')
    depth_image_sub = ecto_ros.ecto_sensor_msgs.Subscriber_Image(
        "depth_image_sub", topic_name='/camera/depth_registered/image_raw')
    rgb_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo(
        "rgb_camera_info_sub", topic_name='/camera/rgb/camera_info')
    depth_camera_info_sub = ecto_ros.ecto_sensor_msgs.Subscriber_CameraInfo(
        "depth_camera_info_sub",
        topic_name='/camera/depth_registered/camera_info')

    #   Converte ros topics to cv types
    msg2cloud = ecto_pcl_ros.Message2PointCloud("msg2cloud", format=XYZRGB)
    image2cv = ecto_ros.Image2Mat()
    depth2cv = ecto_ros.Image2Mat()
    rgb_info2cv = ecto_ros.CameraInfo2Cv()
    depth_info2cv = ecto_ros.CameraInfo2Cv()

    graph += [
        cloud_sub['output'] >> msg2cloud[:], rgb_image_sub[:] >> image2cv[:],
        depth_image_sub[:] >> depth2cv[:],
        rgb_camera_info_sub[:] >> rgb_info2cv[:],
        depth_camera_info_sub[:] >> depth_info2cv[:]
    ]

    #display the mask

    display = highgui.imshow(name='Poses')
    graph += [
        image2cv[:] >> display[:],
    ]

    # convert the image to grayscale
    #    rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.Conversion.RGB2GRAY)
    #    graph += [image2cv[:]  >> rgb2gray[:] ]

    # get a pose use the dot pattern: there might be a scale ambiguity as this is 3d only
    poser = OpposingDotPoseEstimator(
        rows=5,
        cols=3,
        pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
        square_size=0.04,
        debug=True)
    graph += [
        image2cv[:] >> poser['color_image'],
        rgb_info2cv['K'] >> poser['K_image'], image2cv[:] >> poser['image']
    ]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    return (plasm, segmentation_cell
            )  # return segmentation for tuning of parameters.
示例#13
0
observation_renderer = ecto_yolo.ObservationRenderer()

json_db_str = '{"type": "CouchDB", "root": "http://localhost:5984", "collection": "object_recognition"}'
json_db = ecto.Constant(value=json_db_str)

object_id_str = '4680aac58c1d263b9449d57bd2000f27'
object_id = ecto.Constant(value=object_id_str)

frame_id_str = 'camera_optical_frame'
frame_id = ecto.Constant(value=frame_id_str)

ImageBagger = ecto_sensor_msgs.Bagger_Image
CameraInfoBagger = ecto_sensor_msgs.Bagger_CameraInfo

image_ci = ecto_ros.CameraInfo2Cv('camera_info -> cv::Mat')
image = ecto_ros.Image2Mat()
depth = ecto_ros.Image2Mat()

bag = "/home/sam/rosbags/sigverse/no_objects.bag"

baggers = dict(
    image=ImageBagger(topic_name='/hsrb/head_rgbd_sensor/rgb/image_raw'),
    image_ci=CameraInfoBagger(
        topic_name='/hsrb/head_rgbd_sensor/rgb/camera_info'),
    depth=ImageBagger(topic_name='/hsrb/head_rgbd_sensor/depth/image_raw'),
)

# this will read all images in the path
path = '/home/sam/Code/vision/VOCdevkit/VOC2012/JPEGImages'
file_source = ImageReader(path=os.path.expanduser(path))
示例#14
0
    plasm = ecto.Plasm()
    sched = ecto.schedulers.Singlethreaded(plasm)

    #lil bit of debug On/Off
    debug = True
    if 'R' in sys.argv:
        debug = False

    #add our black box to the plasm.
    pose_from_fiducial = PoseFromFiducial(plasm,
                                          rows=7,
                                          cols=3,
                                          pattern_type="acircles",
                                          square_size=0.03,
                                          debug=debug)

    sub_rgb = ecto_sensor_msgs.Subscriber_Image('Image sub',
                                                topic_name='image')
    im2mat_rgb = ecto_ros.Image2Mat('Image -> cv::Mat')
    pose_gen = ecto_ros.RT2PoseStamped('R,T -> PoseStamped',
                                       frame_id='/openni_rgb_optical_frame')
    pose_pub = ecto_geometry_msgs.Publisher_PoseStamped('Pose Pub',
                                                        topic_name='dot_pose')
    plasm.connect(sub_rgb["output"] >> im2mat_rgb["image"],
                  im2mat_rgb["image"] >> pose_from_fiducial['image'],
                  pose_from_fiducial["R", "T"] >> pose_gen["R", "T"],
                  pose_gen['pose'] >> pose_pub[:])

    ecto.view_plasm(plasm)
    sched.execute()