Esempio n. 1
0
 def renderer(self):
     return meshrenderer.Renderer(
         self.ply_model_paths,
         samples=1,
         vertex_tmp_store_folder='.',
         vertex_scale=float(
             self.vertex_scale[0]))  # 1000 for models in meters
Esempio n. 2
0
    def __init__(self,
                 models_cad_files,
                 vertex_tmp_store_folder,
                 vertex_scale,
                 width,
                 height,
                 K,
                 augmenters,
                 vocdevkit_path,
                 min_num_objects_per_scene,
                 max_num_objects_per_scene,
                 near_plane=10,
                 far_plane=2000,
                 min_n_views=1000,
                 radius=650,
                 obj_ids=None,
                 model_type='reconst'):

        self._models_cad_files = models_cad_files
        self._width = width
        self._height = height
        self._radius = radius
        self._K = K
        self._augmenters = augmenters
        self._min_num_objects_per_scene = min_num_objects_per_scene
        self._max_num_objects_per_scene = max_num_objects_per_scene
        self._near_plane = near_plane
        self._far_plane = far_plane
        self.obj_ids = np.array(obj_ids)

        # pascal_imgs_path = os.path.join(vocdevkit_path, 'VOC2012/JPEGImages')
        self._voc_imgs = glob.glob(os.path.join(
            vocdevkit_path, '*.jpg')) + glob.glob(
                os.path.join(vocdevkit_path, '*.png'))
        print len(self._voc_imgs)
        if model_type == 'reconst':
            self._renderer = mr_phong.Renderer(
                self._models_cad_files,
                1,
                vertex_tmp_store_folder=vertex_tmp_store_folder,
                vertex_scale=vertex_scale)
        elif model_type == 'cad':
            self._renderer = mr.Renderer(
                self._models_cad_files,
                1,
                vertex_tmp_store_folder=vertex_tmp_store_folder,
                vertex_scale=vertex_scale)
        else:
            print 'unknown model_type, ', model_type
            exit()

        azimuth_range = (0, 2 * math.pi)
        elev_range = (-0.5 * math.pi, 0.5 * math.pi)
        self.all_views, _ = view_sampler.sample_views(min_n_views, radius,
                                                      azimuth_range,
                                                      elev_range)
Esempio n. 3
0
 def renderer(self):
     from auto_pose.meshrenderer import meshrenderer, meshrenderer_phong
     if self._kw['model'] == 'cad':
         renderer = meshrenderer.Renderer([self._kw['model_path']],
                                          int(self._kw['antialiasing']),
                                          self.dataset_path,
                                          float(self._kw['vertex_scale']))
     elif self._kw['model'] == 'reconst':
         renderer = meshrenderer_phong.Renderer(
             [self._kw['model_path']], int(self._kw['antialiasing']),
             self.dataset_path, float(self._kw['vertex_scale']))
     else:
         'Error: neither cad nor reconst in model path!'
         exit()
     return renderer
Esempio n. 4
0
    def renderer(self):
        from auto_pose.meshrenderer import meshrenderer, meshrenderer_phong

        if self._kw['model'] == 'cad':
            renderer = meshrenderer.Renderer(
               eval(str(self._kw['model_path'])),
               int(self._kw['antialiasing']),
               self.dataset_path,
               float(self._kw['vertex_scale'])
            )
        elif self._kw['model'] == 'reconst':
            # print(meshrenderer)
            renderer = meshrenderer_phong.Renderer(
               eval(str(self._kw['model_path'])),
               int(self._kw['antialiasing']),
               vertex_tmp_store_folder = self.dataset_path,
               vertex_scale = float(self._kw['vertex_scale'])
            )
        else:
            print('Error: neither cad nor reconst in model path!')
            exit()
        return renderer
 def renderer(self):
     from auto_pose.meshrenderer import meshrenderer, meshrenderer_phong
     render_dims = list(map(int, eval(self._kw['render_dims'])))
     if self._kw['model'] == 'cad':
         renderer = meshrenderer.Renderer(
            [self._kw['model_path']],
            int(self._kw['antialiasing']),
            self.dataset_path,
            float(self._kw['vertex_scale']),
            max_fbo_width=render_dims[0], max_fbo_height=render_dims[1]
         )
     elif self._kw['model'] == 'reconst':
         renderer = meshrenderer_phong.Renderer(
            [self._kw['model_path']],
            int(self._kw['antialiasing']),
            self.dataset_path,
            float(self._kw['vertex_scale']),
            max_fbo_width=render_dims[0], max_fbo_height=render_dims[1]
         )
     else:
         'Error: neither cad nor reconst in model path!'
         exit()
     return renderer
 def renderer(self):
     return meshrenderer.Renderer([self.model_path], 1, '.', 1)
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-test_config",
                        type=str,
                        required=False,
                        default='test_config_webcam.cfg')
    parser.add_argument("-vis", action='store_true', default=False)
    args = parser.parse_args()

    workspace_path = os.environ.get('AE_WORKSPACE_PATH')
    if workspace_path == None:
        print('Please define a workspace path:')
        print('export AE_WORKSPACE_PATH=/path/to/workspace')
        exit(-1)

    test_configpath = os.path.join(workspace_path, 'cfg_eval',
                                   args.test_config)
    test_args = configparser.ConfigParser()
    test_args.read(test_configpath)

    ae_pose_est = AePoseEstimator(test_configpath)

    videoStream = WebcamVideoStream(0, ae_pose_est._width,
                                    ae_pose_est._height).start()

    if args.vis:
        from auto_pose.meshrenderer import meshrenderer

        ply_model_paths = [
            str(train_args.get('Paths', 'MODEL_PATH'))
            for train_args in ae_pose_est.all_train_args
        ]
        cad_reconst = [
            str(train_args.get('Dataset', 'MODEL'))
            for train_args in ae_pose_est.all_train_args
        ]

        renderer = meshrenderer.Renderer(
            ply_model_paths,
            samples=1,
            vertex_tmp_store_folder=get_dataset_path(workspace_path),
            vertex_scale=float(1))  # float(1) for some models

    color_dict = [(0, 255, 0), (0, 0, 255), (255, 0, 0), (255, 255, 0)] * 10

    while videoStream.isActive():
        image = videoStream.read()
        if image is None or not image.any():
            print("Failed to capture webcam image")
            exit(-1)

        boxes, scores, labels = ae_pose_est.process_detection(image)

        all_pose_estimates, all_class_idcs = ae_pose_est.process_pose(
            boxes, labels, image)

        if args.vis:
            bgr, depth, _ = renderer.render_many(
                obj_ids=[clas_idx for clas_idx in all_class_idcs],
                W=ae_pose_est._width,
                H=ae_pose_est._height,
                K=ae_pose_est._camK,
                # R = transform.random_rotation_matrix()[:3,:3],
                Rs=[pose_est[:3, :3] for pose_est in all_pose_estimates],
                ts=[pose_est[:3, 3] for pose_est in all_pose_estimates],
                near=10,
                far=10000,
                random_light=False,
                phong={
                    'ambient': 0.4,
                    'diffuse': 0.8,
                    'specular': 0.3
                })

            bgr = cv2.resize(bgr, (ae_pose_est._width, ae_pose_est._height))

            g_y = np.zeros_like(bgr)
            g_y[:, :, 1] = bgr[:, :, 1]
            im_bg = cv2.bitwise_and(image,
                                    image,
                                    mask=(g_y[:, :, 1] == 0).astype(np.uint8))
            image_show = cv2.addWeighted(im_bg, 1, g_y, 1, 0)

            #cv2.imshow('pred view rendered', pred_view)
            for label, box, score in zip(labels, boxes, scores):
                box = box.astype(np.int32)
                xmin, ymin, xmax, ymax = box[0], box[
                    1], box[0] + box[2], box[1] + box[3]
                print(label)
                cv2.putText(image_show, '%s : %1.3f' % (label, score),
                            (xmin, ymax + 20), cv2.FONT_ITALIC, .5,
                            color_dict[int(label)], 2)
                cv2.rectangle(image_show, (xmin, ymin), (xmax, ymax),
                              (255, 0, 0), 2)

            #cv2.imshow('', bgr)
            cv2.imshow('real', image_show)
            cv2.waitKey(1)
if args.vis:
    from auto_pose.meshrenderer import meshrenderer

    ply_model_paths = [
        str(train_args.get('Paths', 'MODEL_PATH'))
        for train_args in ae_pose_est.all_train_args
    ]
    cad_reconst = [
        str(train_args.get('Dataset', 'MODEL'))
        for train_args in ae_pose_est.all_train_args
    ]

    renderer = meshrenderer.Renderer(
        ply_model_paths,
        samples=1,
        vertex_tmp_store_folder=get_dataset_path(workspace_path),
        vertex_scale=float(1))  # float(1) for some models

color_dict = [(0, 255, 0), (0, 0, 255), (255, 0, 0), (255, 255, 0)] * 10

while camera.IsGrabbing():  #videoStream.isActive():
    grabResult = camera.RetrieveResult(5000,
                                       pylon.TimeoutHandling_ThrowException)
    if grabResult.GrabSucceeded():
        # Access the image data
        image = converter.Convert(grabResult)
        image = image.GetArray()

        boxes, scores, labels = ae_pose_est.process_detection(image)