Esempio n. 1
0
 def __init__(self, plasm):
     ecto.BlackBox.__init__(self, plasm)
     self.video_cap = highgui.VideoCapture(video_device=0)
     self.fps = highgui.FPSDrawer()
     self.rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)
     self.circle_detector = calib.PatternDetector('Dot Detector',
                                             rows=7, cols=3, pattern_type="acircles",
                                             square_size=0.03)
     self.circle_drawer = calib.PatternDrawer('Circle Draw',
                                         rows=7, cols=3)
     self.circle_display = highgui.imshow('Pattern show',
                                     name='Pattern', waitKey=2, maximize=True)
     self.pose_calc = calib.FiducialPoseFinder('Pose Calc')
     self.pose_draw = calib.PoseDrawer('Pose Draw')
     self.camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")
Esempio n. 2
0
def do_ecto(device_id=0, frame_id='base'):
    video_capture = highgui.VideoCapture('Video Camera',
                                         video_device=device_id)
    mat2image = ecto_ros.Mat2Image(frame_id=frame_id, encoding='bgr8')
    pub_rgb = ImagePub("image pub", topic_name='image')

    graph = [
        video_capture["image"] >> mat2image["image"],
        mat2image["image"] >> pub_rgb["input"]
    ]

    plasm = ecto.Plasm()
    plasm.connect(graph)
    sched = ecto.schedulers.Threadpool(plasm)
    sched.execute()
Esempio n. 3
0
def make_graph(k, sigma, debug):

    video = highgui.VideoCapture(video_device=0)
    g = imgproc.GaussianBlur(sigma=sigma)
    graph = []
    graph += [video['image'] >> g['input']]
    for x in range(k):
        nextg = imgproc.GaussianBlur(sigma=sigma)
        graph += [g['out'] >> nextg['input']]
        g = nextg
    fps = highgui.FPSDrawer()
    graph += [
        g['out'] >> fps[:],
        fps[:] >> highgui.imshow("megagauss", name="megagauss", waitKey=10)[:]
    ]
    return graph
Esempio n. 4
0
#!/usr/bin/env python
# add pose estimation
import ecto
from ecto_opencv import highgui, calib, imgproc

plasm = ecto.Plasm()
sched = ecto.schedulers.Singlethreaded(plasm)

video_cap = highgui.VideoCapture(video_device=0)

fps = highgui.FPSDrawer()

rgb2gray = imgproc.cvtColor('rgb -> gray', flag=7)

circle_detector = calib.PatternDetector(rows=7,
                                        cols=3,
                                        pattern_type="acircles",
                                        square_size=0.03)
circle_drawer = calib.PatternDrawer(rows=7, cols=3)
circle_display = highgui.imshow('Pattern show',
                                name='Pattern',
                                waitKey=2,
                                maximize=True)

pose_calc = calib.FiducialPoseFinder('Pose Calc')

pose_draw = calib.PoseDrawer('Pose Draw')

camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")

plasm.connect(
Esempio n. 5
0
#!/usr/bin/env python
# abstract the input.
import ecto
from ecto_opencv import highgui, calib, imgproc, cv_bp as cv
from object_recognition.observations import *
from ecto_object_recognition import capture

plasm = ecto.Plasm()

video_cap = highgui.VideoCapture(video_device=0, width=640, height=480)
camera_intrinsics = calib.CameraIntrinsics(camera_file='camera.yml')

poser = OpposingDotPoseEstimator(rows=5, cols=3,
                                 pattern_type=calib.ASYMMETRIC_CIRCLES_GRID,
                                 square_size=0.04, debug=True
                                 )

bgr2rgb = imgproc.cvtColor('rgb -> bgr', flag=imgproc.RGB2BGR)
rgb2gray = imgproc.cvtColor('rgb -> gray', flag=imgproc.RGB2GRAY)
display = highgui.imshow('Poses', name='Poses')

plasm.connect(
         video_cap['image'] >> rgb2gray[:], 
         rgb2gray[:] >> poser['image'],
         video_cap['image'] >> poser['color_image'],
         poser['debug_image'] >> display['image'],
         camera_intrinsics['K'] >> poser['K']
         )

if __name__ == '__main__':
    from ecto.opts import doit