def configure(self, p, i, o):
     self.points = MatReader(filename='%s/points.yaml' % p.directory)
     self.points3d = MatReader(filename='%s/points3d.yaml' % p.directory)
     self.descriptors = MatReader(filename='%s/descriptors.yaml' % p.directory)
     self.R = MatReader(filename='%s/R.yaml' % p.directory)
     self.T = MatReader(filename='%s/T.yaml' % p.directory)
     self.image = imread(image_file='%s/train.png' % p.directory)
Example #2
0
 def configure(self, p, i, o):
     self.points = MatReader(filename='%s/points.yaml' % p.directory)
     self.points3d = MatReader(filename='%s/points3d.yaml' % p.directory)
     self.descriptors = MatReader(filename='%s/descriptors.yaml' % p.directory)
     self.R = MatReader(filename='%s/R.yaml' % p.directory)
     self.T = MatReader(filename='%s/T.yaml' % p.directory)
     self.image = imread(image_file='%s/train.png' % p.directory)
Example #3
0
def match(options,
        scale=1.0,
        train_scale=1 / 3.0,
        scale_factor=1.05,
        n_levels=5,
        n_features_train=10000,
        n_features_test=1000,
        match_distance=120,
    ):

    if not os.path.exists(options.output):
        os.makedirs(options.output)

    train = imread('Train image', image_file=options.train,
                   mode=ImageMode.GRAYSCALE)
    test = imread('Test image', image_file=options.test,
            mode=ImageMode.GRAYSCALE)
    train_name, _extension = os.path.splitext(os.path.basename(options.train))
    test_name, _extension = os.path.splitext(os.path.basename(options.test))

    print 'Scale %d is %f' % (0, scale)
    for i in range(n_levels - 1):
        scale *= 1.0 / scale_factor
        print 'Scale %d is %f' % (i + 1, scale)
    orb_train = ORB(n_features=n_features_train,
            n_levels=n_levels, scale_factor=scale_factor)
    orb_test = ORB(n_features=n_features_test, n_levels=1)

    plasm = ecto.Plasm()
    scale = Scale(factor=train_scale, interpolation=AREA)
    plasm.connect(train['image'] >> scale['image'])

    train_pts = hookup_orb(plasm, scale, orb_train)
    test_pts = hookup_orb(plasm, test, orb_test)

    matcher = Matcher()
    plasm.connect(orb_train['descriptors'] >> matcher['train'],
                 orb_test['descriptors'] >> matcher['test'],
                 )

    matchesMat = MatchesToMat()
    h_est = MatchRefinement(match_distance=match_distance)
    plasm.connect(train_pts['points'] >> h_est['train'],
                  test_pts['points'] >> h_est['test'],
                  matcher['matches'] >> h_est['matches'],
                  h_est['matches'] >> matchesMat['matches']
                  )

    match_draw = DrawMatches()
    output_base = os.path.join(options.output,
            '%(train)s_%(test)s' %
            dict(train=train_name, test=test_name))
    match_image = output_base + '.png'
    image_saver = ImageSaver(filename=match_image)

    plasm.connect(train_pts['points'] >> match_draw['train'],
                  test_pts['points'] >> match_draw['test'],
                  h_est['matches'] >> match_draw['matches'],
                  h_est['matches_mask'] >> match_draw['matches_mask'],
                  scale['image'] >> match_draw['train_image'],
                  test['image'] >> match_draw['test_image'],
                  match_draw[:] >> image_saver[:]
                  )

    match_eval = MatchEval(output=options.output)
    plasm.connect(matchesMat['matches'] >> match_eval['matches'],
                  train_pts['points'] >> match_eval['train'],
                  test_pts['points'] >> match_eval['test'],
                  h_est['matches_mask'] >> match_eval['inliers'],
                  )
    run_plasm(options, plasm, locals=vars())
Example #4
0
def do_projector():
    options = parse_options()

    # define the input
    subs = dict(image=ImageSub(topic_name='camera/rgb/image_color', queue_size=1),
                depth=ImageSub(topic_name='camera/depth/image', queue_size=1),
                #depth_info=CameraInfoSub(topic_name='camera/depth/camera_info', queue_size=0),
                #image_info=CameraInfoSub(topic_name='camera/rgb/camera_info', queue_size=0),
             )

    sync = ecto_ros.Synchronizer('Synchronizator', subs=subs)

    im2mat_rgb = ecto_ros.Image2Mat()
    im2mat_depth = ecto_ros.Image2Mat()
    invert = imgproc.BitwiseNot()
    brg2rgb = imgproc.cvtColor('bgr -> rgb', flag=4)
    bgr2gray = imgproc.cvtColor('bgr -> gray', flag=7)

    camera_info = calib.CameraIntrinsics('Camera Info', camera_file="camera.yml")

    calibrator = projector.Calibrator()
    plasm = ecto.Plasm()
    offset_x = 0

    s1 = ecto.Strand()
    main_display = highgui.imshow("rgb show", name="rgb", waitKey=5, strand=s1)
    graph = [
                sync["image"] >> im2mat_rgb["image"],
                im2mat_rgb["image"] >> main_display[:],
                sync["depth"] >> im2mat_depth["image"],
                im2mat_depth["image"] >> highgui.imshow("depth show", name="depth", waitKey= -1, strand=s1)[:],
                im2mat_rgb["image"] >> (brg2rgb["input"], bgr2gray["input"]),
#                sync["image","depth"] >> pattern_detection['image', 'depth'],
#                pattern_detection['points'] >> projection_estimator['points']
            ]

    print "Press 's' to capture a view."

    # add the display of the pattern
    video_display = highgui.imshow('pattern',
                               name='video_cap', waitKey=2, maximize=False, autoSize=True)
    case = 2
    if case == 0:
        offset_x = -.25
        pose_from_fiducial = PoseFromFiducial(plasm,
                                        rows=5, cols=3,
                                        pattern_type="acircles",
                                        square_size=0.04,
                                        offset_x=offset_x,
                                        debug=DEBUG)
        # deal with fiducial markers
        graph += [ brg2rgb["out"] >> pose_from_fiducial['color_image'],
                bgr2gray["out"] >> pose_from_fiducial['image'],
                camera_info['K'] >> pose_from_fiducial['K'],
                pose_from_fiducial['debug_image'] >> highgui.imshow("pattern show", name="pattern", waitKey= -1, strand=s1)[:],
                ]
        # Deal with the warping
        warper = projector.FiducialWarper(projection_file='projector_calibration.yml',
        radius=0.10)
        graph += [pose_from_fiducial['R', 'T', 'found'] >> warper['R', 'T', 'found'],
              warper['output'] >> highgui.imshow("warped image", name="warped", waitKey= -1, strand=s1)[:],
              ]
    elif case == 1:
        warper = projector.DepthWarper(projection_file='projector_calibration.yml')
        graph += [camera_info['K'] >> warper['K'],
                  im2mat_depth["image"] >> warper['depth'],
                  warper['output'] >> highgui.imshow("warped image", name="warped", waitKey= -1, strand=s1)[:],
                  ]
    elif case == 2:
        # Deal with the warping
        warper = projector.ImageWarper(projection_file='projector_calibration.yml', offset_x=0,offset_y=0)
        pose_from_plane = projector.PlaneFitter()
        pose_draw = calib.PoseDrawer('Plane Pose Draw')
        graph += [im2mat_depth["image"] >> pose_from_plane['depth'],
                  camera_info['K'] >> pose_from_plane['K'],
                  pose_from_plane['R', 'T'] >> warper['R', 'T'],
                  im2mat_rgb["image"] >> (pose_draw['image'],),
                  highgui.imread(image_file='lena.jpg')["image"] >> (warper['image'],),
                  camera_info['K'] >> pose_draw['K'],
                  pose_from_plane['R', 'T'] >> pose_draw['R', 'T'],
                  pose_draw['output'] >> highgui.imshow("pose", name="pose", waitKey= -1, strand=s1)[:],
                  warper['output'] >> highgui.imshow("warped image", name="warped", waitKey= -1, strand=s1)[:],
                  ]

    plasm.connect(graph)

    # display DEBUG data if needed
    #if DEBUG:
    #    print plasm.viz()
    #    ecto.view_plasm(plasm)

    # execute the pipeline
    sched = ecto.schedulers.Singlethreaded(plasm)
    sched.execute()
Example #5
0
    import argparse
    parser = argparse.ArgumentParser(description='Test orb on images.')
    parser.add_argument('-i,--input', dest='input',
                        help='The input dir. %(default)s', default='./images')
    parser.add_argument('-o,--output', dest='output', type=str,
                        help='The output directory for this template. Default: %(default)s', default='./')
    factory = cell_options(parser, ORB, 'ORB')
    scheduler_options(parser.add_argument_group('Scheduler'), default_niter=1)
    options = parser.parse_args()
    options.niter = 1
    options.orb_factory = factory
    return options

options = parse_args()

image = imread(image_file=options.input)
shutil.copy(options.input, os.path.join(options.output, 'train.png'))
orb_m = options.orb_factory(options)
rgb2gray = cvtColor (flag=Conversion.RGB2GRAY)
kpts2mat = KeypointsToMat()
ptsTo3d = PointsTo3d(scale=0.0254 / 100) #100 dpi ~> 25.4 mm/ 100 px
plasm = ecto.Plasm()
plasm.connect(image['image'] >> orb_m['image'],
              orb_m['keypoints'] >> kpts2mat['keypoints'],
              kpts2mat['points'] >> ptsTo3d['points']
              )

if not os.path.exists(options.output):
    print 'making ', options.output
    os.makedirs(options.output)