예제 #1
0
    def connections(self, _p):
        train = self.train
        orb = self.orb
        graph = [
            self.gray_image[:] >> orb['image'],
            self.points3d[:] >> orb['points3d'],
            self.depth_mask[:] >> orb['points3d_mask']
        ]

        matcher = Matcher()
        #        if self.use_lsh:
        #           matcher = self.lsh
        graph += [
            orb['descriptors'] >> matcher['test'],
            train['descriptors'] >> matcher['train'],
        ]

        #3d estimation
        pose_estimation = self.pose_estimation
        graph += [
            matcher['matches'] >> pose_estimation['matches'],
            orb['points', 'points3d'] >> pose_estimation['test_2d', 'test_3d'],
            train['points', 'points3d'] >> pose_estimation['train_2d',
                                                           'train_3d']
        ]

        if self.show_matches:
            #display matches
            match_drawer = DrawMatches()
            graph += [
                pose_estimation['matches'] >> match_drawer['matches'],
                pose_estimation['matches_mask'] >>
                match_drawer['matches_mask'],
                orb['points'] >> match_drawer['test'],
                train['points'] >> match_drawer['train'],
                self.rgb_image[:] >> match_drawer['test_image'],
                train['image'] >> match_drawer['train_image'],
                match_drawer['output'] >> imshow(name='Matches')['']
            ]

        tr = self.tr
        fps = self.fps
        pose_draw = PoseDrawer()
        graph += [
            train['R', 'T'] >> tr['R1', 'T1'],
            pose_estimation['R', 'T'] >> tr['R2', 'T2'],
            tr['R', 'T'] >> pose_draw['R', 'T'],
            pose_estimation['found'] >> pose_draw['trigger'],
            self.K_image[:] >> pose_draw['K'],
            self.rgb_image[:] >> pose_draw['image'],
            pose_draw['output'] >> fps[:],
        ]
        return graph
예제 #2
0
    def connections(self):
        n_features = 4000
        train = self.train
        orb = FeatureFinder('ORB test', n_features=n_features, n_levels=3, scale_factor=1.1, thresh=100, use_fast=False)
        graph = [ self.gray_image[:] >> orb['image'],
                  self.points3d[:] >> orb['points3d'],
                  self.depth_mask[:] >> orb['mask']
                ]

        matcher = LSHMatcher('LSH', n_tables=4, multi_probe_level=1, key_size=10, radius=70)
        #matcher = Matcher()
        graph += [ orb['descriptors'] >> matcher['test'],
                   train['descriptors'] >> matcher['train'],
                  ]

        #3d estimation
        pose_estimation = self.pose_estimation
        graph += [matcher['matches'] >> pose_estimation['matches'],
                  orb['points'] >> pose_estimation['test_2d'],
                  train['points'] >> pose_estimation['train_2d'],
                  orb['points3d'] >> pose_estimation['test_3d'],
                  train['points3d'] >> pose_estimation['train_3d'],
                  ]

        if self.show_matches:
            #display matches
            match_drawer = DrawMatches()
            graph += [pose_estimation['matches'] >> match_drawer['matches'],
                      pose_estimation['matches_mask'] >> match_drawer['matches_mask'],
                      orb['points'] >> match_drawer['test'],
                      train['points'] >> match_drawer['train'],
                      self.rgb_image[:] >> match_drawer['test_image'],
                      train['image'] >> match_drawer['train_image'],
                      match_drawer['output'] >> imshow(name='Matches')['']
                      ]

        tr = self.tr
        fps = self.fps
        pose_draw = PoseDrawer()
        graph += [train['R', 'T'] >> tr['R1', 'T1'],
                  pose_estimation['R', 'T'] >> tr['R2', 'T2'],
                  tr['R', 'T'] >> pose_draw['R', 'T'],
                  pose_estimation['found'] >> pose_draw['trigger'],
                  self.K[:] >> pose_draw['K'],
                  self.rgb_image[:] >> pose_draw['image'],
                  pose_draw['output'] >> fps[:],
                  ]
        return graph
    def connect_debug(self):
        self.pose_drawer = PoseDrawer()

        self.graph += [
            self.observation_renderer['R'] >> MatPrinter(name='R')['mat'],
            self.observation_renderer['T'] >> MatPrinter(name='T')['mat'],
            self.observation_renderer['K'] >> MatPrinter(name='K')['mat'],
            self.observation_renderer['R', 'T',
                                      'K'] >> self.pose_drawer['R', 'T', 'K'],
            self.observation_renderer['debug_image'] >> self.pose_drawer['image'],
        ]

        self.graph += [
            self.pose_drawer['output'] >> self.display['image'],
            # self.observation_renderer['depth'] >> self.display['image'],
            # self.observation_renderer['mask'] >> self.display['image'],
        ]
예제 #4
0
rows = 4
cols = 5
square_size = 0.025  #2.5 cm
pattern_type = CHESSBOARD

rgb2gray = cvtColor(flag=Conversion.RGB2GRAY)

detector = PatternDetector(rows=rows,
                           cols=cols,
                           pattern_type=pattern_type,
                           square_size=square_size)

pattern_drawer = PatternDrawer(rows=rows, cols=cols)
camera_intrinsics = CameraIntrinsics(camera_file=args.camera)
poser = FiducialPoseFinder()
pose_drawer = PoseDrawer()
plasm = ecto.Plasm()
plasm.connect(
    image_reader['image'] >> (pattern_drawer['input'], rgb2gray['image']),
    rgb2gray['image'] >> detector['input'],
    detector['ideal', 'out', 'found'] >> poser['ideal', 'points', 'found'],
    camera_intrinsics['K'] >> poser['K'],
    detector['out', 'found'] >> pattern_drawer['points', 'found'],
    poser['R', 'T'] >> pose_drawer['R', 'T'],
    poser['R'] >> MatPrinter(name='R')['mat'],
    poser['T'] >> MatPrinter(name='T')['mat'],
    pattern_drawer['out'] >> pose_drawer['image'],
    camera_intrinsics['K'] >> pose_drawer['K'],
    pose_drawer['output'] >> imshow(name='Pose', waitKey=0)['image'],
)