コード例 #1
0
ファイル: ros.py プロジェクト: trinhtrannp/hcrdocker
    def configure(self, _p, _i, _o):
        #ROS message converters
        self._depth_converter = ecto_ros.Image2Mat()
        self._rgb_image = ecto_ros.Image2Mat(swap_rgb=True)

        #these transform the depth into something usable
        self._points3d = DepthTo3d()
コード例 #2
0
import ecto
from ecto_opencv.highgui import imshow
from ecto_opencv.calib import DepthTo3d
from image_pipeline import Rectifier, StereoModelLoader, DepthRegister, CameraModelToCv, CV_INTER_NN
from ecto_openni import OpenNICapture, DEPTH_RGB, DEPTH_IR, RGB, IR, IRGamma, enumerate_devices
from ecto_object_recognition.conversion import MatToPointCloudXYZRGB
from ecto_pcl import PointCloudT2PointCloud, CloudViewer, XYZRGB

openni_reg = True
print enumerate_devices()

capture = OpenNICapture('Camera',
                        stream_mode=DEPTH_RGB,
                        registration=openni_reg,
                        latched=False)
depthTo3d = DepthTo3d('Depth ~> 3d')
to_xyzrgb = MatToPointCloudXYZRGB('OpenCV ~> PCL')
pcl_cloud = PointCloudT2PointCloud('conversion', format=XYZRGB)
cloud_viewer = CloudViewer('Cloud Display')

plasm = ecto.Plasm()
plasm.connect(capture['K'] >> depthTo3d['K'],
              capture['image'] >> imshow('Image Display')[:],
              capture['depth'] >> depthTo3d['depth'],
              depthTo3d['points3d'] >> to_xyzrgb['points'],
              capture['image'] >> to_xyzrgb['image'],
              to_xyzrgb[:] >> pcl_cloud[:], pcl_cloud[:] >> cloud_viewer[:])

if __name__ == '__main__':
    from ecto.opts import doit
    doit(plasm,
コード例 #3
0
    options = parser.parse_args()

    return options


if __name__ == '__main__':
    options = parse_args()

    plasm = ecto.Plasm()

    #setup the input source, grayscale conversion
    from ecto_openni import SXGA_RES, FPS_15
    source = create_source('image_pipeline','OpenNISource',image_mode=SXGA_RES,image_fps=FPS_15)
    rgb2gray = cvtColor('Grayscale', flag=Conversion.RGB2GRAY)
    plane_finder = PlaneFinder(l=600, nu=100)
    depth_to_3d = DepthTo3d()

    plasm.connect(source['image'] >> rgb2gray ['image'])

    #connect up the pose_est
    connections = [ source['image'] >> plane_finder['image'],
                    source['depth_raw'] >> depth_to_3d['depth'],
                    source['K'] >> depth_to_3d['K'],
                    depth_to_3d['points3d'] >> plane_finder['points3d'],
                    source['K'] >> plane_finder['K'] ]
    connections += [plane_finder['image'] >> imshow(name='hulls')[:],
                    source['image'] >> imshow(name='original')[:]]
    plasm.connect(connections)

    run_plasm(options, plasm, locals=vars())
コード例 #4
0
from registration import PoseEstimator3DProj, FeatureFinder

n_feats = 1000
plasm = ecto.Plasm()

#setup the input source, grayscale conversion
capture = OpenNICapture(stream_mode=DEPTH_RGB, registration=True, sync=True)
rgb2gray = cvtColor (flag=Conversion.RGB2GRAY)

plasm.connect(capture['image'] >> rgb2gray ['image'])

#convenience variable for the grayscale
img_src = rgb2gray['image']

# calculate 3d points
depthTo3d = DepthTo3d()
depthMask = DepthMask()
plasm.connect(capture['depth'] >> depthTo3d['depth'],
              capture['depth'] >> depthMask['depth'],
              capture['K'] >> depthTo3d['K']
              )

# connect up the test ORB
orb = FeatureFinder('ORB test', n_features=n_feats, n_levels=6, scale_factor=1.2, thresh=100, use_fast=False)
draw_kpts = DrawKeypoints()
fps = FPSDrawer()
disp = imshow('orb display', name='ORB', triggers=dict(save=ord('s')))

plasm.connect(img_src >> orb['image'],
              depthTo3d['points3d'] >> orb['points3d'],
#              depthMask['mask'] >> orb['mask'],
コード例 #5
0
from registration import PoseEstimator3DProj, FeatureFinder, RotateZ

n_feats = 1000
plasm = ecto.Plasm()

#setup the input source, grayscale conversion
capture = OpenNICapture(stream_mode=DEPTH_RGB, registration=True, sync=True)
rgb2gray = cvtColor(flag=Conversion.RGB2GRAY)

plasm.connect(capture['image'] >> rgb2gray['image'])

#convenience variable for the grayscale
img_src = rgb2gray['image']

# calculate 3d points
depthTo3d = DepthTo3d()
plasm.connect(capture['depth'] >> depthTo3d['depth'],
              capture['K'] >> depthTo3d['K'])

# connect up the test ORB
orb = FeatureFinder('ORB test',
                    n_features=n_feats,
                    n_levels=6,
                    scale_factor=1.2,
                    thresh=100,
                    use_fast=False)

plasm.connect(
    img_src >> orb['image'],
    depthTo3d['points3d'] >> orb['points3d'],
)