Ejemplo n.º 1
0
                           interpolation_mode=CV_INTER_NN)
    reg_depth = DepthRegister()

    plasm.connect(
        capture['image'] >> rect_rgb['image'],
        stereo_model['left_model'] >> rect_rgb['camera'],
        capture['depth'] >> rect_depth['image'],
        stereo_model['right_model'] >> rect_depth['camera'],
        # hook up the registration
        stereo_model['stereo_model'] >> reg_depth['rgbd_model'],
        rect_depth['image'] >> reg_depth['image'])

camera_converter = CameraModelToCv()
depthTo3d = DepthTo3d()
to_xyzrgb = MatToPointCloudXYZRGB()
pcl_cloud = PointCloudT2PointCloud(format=XYZRGB)
cloud_viewer = CloudViewer()

if not openni_reg:
    plasm.connect(stereo_model['left_model'] >> camera_converter['camera'],
                  camera_converter['Kp'] >> depthTo3d['K'],
                  reg_depth['image'] >> depthTo3d['depth'],
                  depthTo3d['points3d'] >> to_xyzrgb['points'],
                  rect_rgb['image'] >> to_xyzrgb['image'],
                  to_xyzrgb[:] >> pcl_cloud[:],
                  pcl_cloud[:] >> cloud_viewer[:])
else:
    plasm.connect(stereo_model['left_model'] >> camera_converter['camera'],
                  camera_converter['K'] >> depthTo3d['K'],
                  capture['depth'] >> depthTo3d['depth'],
                  depthTo3d['points3d'] >> to_xyzrgb['points'],
from ecto_opencv.calib import DepthTo3d
from image_pipeline import Rectifier, StereoModelLoader, DepthRegister, CameraModelToCv, CV_INTER_NN
from ecto_openni import OpenNICapture, DEPTH_RGB, DEPTH_IR, RGB, IR, IRGamma, enumerate_devices
from ecto_object_recognition.conversion import MatToPointCloudXYZRGB
from ecto_pcl import PointCloudT2PointCloud, CloudViewer, XYZRGB

openni_reg = True
print enumerate_devices()

capture = OpenNICapture('Camera',
                        stream_mode=DEPTH_RGB,
                        registration=openni_reg,
                        latched=False)
depthTo3d = DepthTo3d('Depth ~> 3d')
to_xyzrgb = MatToPointCloudXYZRGB('OpenCV ~> PCL')
pcl_cloud = PointCloudT2PointCloud('conversion', format=XYZRGB)
cloud_viewer = CloudViewer('Cloud Display')

plasm = ecto.Plasm()
plasm.connect(capture['K'] >> depthTo3d['K'],
              capture['image'] >> imshow('Image Display')[:],
              capture['depth'] >> depthTo3d['depth'],
              depthTo3d['points3d'] >> to_xyzrgb['points'],
              capture['image'] >> to_xyzrgb['image'],
              to_xyzrgb[:] >> pcl_cloud[:], pcl_cloud[:] >> cloud_viewer[:])

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm,
         description='Capture Kinect depth and RGB and register them.',
         locals=vars())
Ejemplo n.º 3
0
from ecto_opencv.highgui import imshow
from image_pipeline.io.source import create_source
from image_pipeline_conversion import MatToPointCloudXYZRGB
from ecto_pcl import PointCloudT2PointCloud, CloudViewer, XYZRGB
from ecto.opts import run_plasm, cell_options, scheduler_options
import argparse
import ecto_ros
import sys

parser = argparse.ArgumentParser(description='My awesome program thing.')

#ecto options
scheduler_options(parser)
args = parser.parse_args()

#add our cell to the parser
source = create_source(
    package_name='image_pipeline',
    source_type='OpenNISource')  #optionally pass keyword args here...

to_xyzrgb = MatToPointCloudXYZRGB('cv::Mat ~> pcl::Cloud')
pcl_cloud = PointCloudT2PointCloud('pcl::Cloud ~> ecto::pcl::Cloud',
                                   format=XYZRGB)
cloud_viewer = CloudViewer('Cloud Display')

plasm = ecto.Plasm()
plasm.connect(source['points3d', 'image'] >> to_xyzrgb['points', 'image'],
              to_xyzrgb[:] >> pcl_cloud[:], pcl_cloud[:] >> cloud_viewer[:])

run_plasm(args, plasm, locals=vars())