Пример #1
0
    def _create_networks(self):
        # 0. create generator
        self.generator = self._create_generator().cuda()

        # 0. create bgnet
        if self._opt.bg_model != 'ORIGINAL':
            self.bgnet = self._create_bgnet().cuda()
        else:
            self.bgnet = self.generator.bg_model

        # 2. create hmr
        self.hmr = self._create_hmr().cuda()

        # 3. create render
        self.render = SMPLRenderer(image_size=self._opt.image_size,
                                   tex_size=self._opt.tex_size,
                                   has_front=self._opt.front_warp,
                                   fill_back=False).cuda()
        # 4. pre-processor
        if self._opt.has_detector:
            self.detector = PersonMaskRCNNDetector(ks=self._opt.bg_ks,
                                                   threshold=0.5,
                                                   to_gpu=True)
        else:
            self.detector = None
    def _create_render(self):
        render = SMPLRenderer(map_name=self._opt.map_name,
                              uv_map_path=self._opt.uv_mapping,
                              tex_size=self._opt.tex_size,
                              image_size=self._opt.image_size, fill_back=False,
                              anti_aliasing=True, background_color=(0, 0, 0), has_front=False)

        return render
Пример #3
0
def visual(model, out_dir):
    global visualizer

    render = SMPLRenderer(image_size=IMG_SIZE).cuda()

    texs = render.debug_textures().cuda()[None]

    with h5py.File(osp.join(out_dir, 'smpl_infos.h5'), 'r') as reader:
        cams_crop = reader['cam_crop']
        poses = reader['pose']
        shapes = reader['shape']
        frame_ids = reader['f_id']

        scan_image_paths = sorted(
            glob.glob(osp.join(out_dir, 'cropped_frames', '*.png')))

        for i in range(len(frame_ids) - 1):
            assert frame_ids[i] < frame_ids[i + 1]

        image_paths = [scan_image_paths[f_id] for f_id in frame_ids]

        for i in tqdm(range(len(image_paths))):
            im_path = image_paths[i]
            image = cv2.imread(im_path)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            image = np.transpose(image, (2, 0, 1))
            image = image.astype(np.float) / 255
            image = torch.tensor(image).float()[None].cuda()

            cams = torch.tensor(cams_crop[i]).float()[None].cuda()
            pose = torch.tensor(poses[i]).float()[None].cuda()
            shape = torch.tensor(shapes[i]).float()[None].cuda()
            verts, _, _ = model.smpl(beta=shape, theta=pose, get_skin=True)
            rd_imgs, _ = render.render(cams, verts, texs.clone())
            sil = render.render_silhouettes(cams, verts)

            masked_img = image * sil[:, None, :, :]

            visualizer.vis_named_img('rd_imgs', rd_imgs, denormalize=False)
            visualizer.vis_named_img('masked_img',
                                     masked_img,
                                     denormalize=False)
            visualizer.vis_named_img('imgs', image, denormalize=False)

            time.sleep(1)
Пример #4
0
 def _create_render(self, faces):
     render = SMPLRenderer(faces=faces, map_name=self._opt.map_name, uv_map_path=self._opt.uv_mapping,
                           tex_size=self._opt.tex_size, image_size=self._opt.image_size, fill_back=True,
                           anti_aliasing=True, background_color=(0, 0, 0), has_front_map=True).cuda()
     return render