Exemple #1
0
    def define_model(self):
        opts = self.opts
        # ----------
        # Options
        # ----------
        self.symmetric = opts.symmetric
        anno_sfm_path = osp.join(opts.cub_cache_dir, 'sfm', 'anno_train.mat')
        anno_sfm = sio.loadmat(anno_sfm_path,
                               struct_as_record=False,
                               squeeze_me=True)
        if opts.sphere_initial:
            sfm_mean_shape = mesh.create_sphere(3)
        else:
            sfm_mean_shape = (np.transpose(anno_sfm['S']),
                              anno_sfm['conv_tri'] - 1)

        img_size = (opts.img_size, opts.img_size)
        self.model = mesh_net.MeshNet(img_size,
                                      opts,
                                      nz_feat=opts.nz_feat,
                                      num_kps=opts.num_kps,
                                      sfm_mean_shape=sfm_mean_shape)

        if opts.num_pretrain_epochs > 0:
            self.load_network(self.model, 'pred', opts.num_pretrain_epochs)

        self.model = self.model.cuda(device=opts.gpu_id)

        # Data structures to use for triangle priors.
        edges2verts = self.model.edges2verts
        # B x E x 4
        edges2verts = np.tile(np.expand_dims(edges2verts, 0),
                              (opts.batch_size, 1, 1))
        self.edges2verts = Variable(
            torch.LongTensor(edges2verts).cuda(device=opts.gpu_id),
            requires_grad=False)
        # For renderering.
        faces = self.model.faces.view(1, -1, 3)
        self.faces = faces.repeat(opts.batch_size, 1, 1)
        # opts.renderer = "smr"
        self.renderer = NeuralRenderer(
            opts.img_size) if opts.renderer == "nmr" else SoftRenderer(
                opts.img_size)
        self.renderer_predcam = NeuralRenderer(
            opts.img_size) if opts.renderer == "nmr" else SoftRenderer(
                opts.img_size)  #for camera loss via projection

        # Need separate NMR for each fwd/bwd call.
        if opts.texture:
            self.tex_renderer = NeuralRenderer(
                opts.img_size) if opts.renderer == "nmr" else SoftRenderer(
                    opts.img_size)
            # Only use ambient light for tex renderer
            self.tex_renderer.ambient_light_only()

        # For visualization
        self.vis_rend = bird_vis.VisRenderer(opts.img_size,
                                             faces.data.cpu().numpy())

        return
Exemple #2
0
    def __init__(self, opts):
        self.opts = opts

        self.symmetric = opts.symmetric

        img_size = (opts.img_size, opts.img_size)
        print('Setting up model..')
        self.model = mesh_net.MeshNet(img_size, opts, nz_feat=opts.nz_feat)

        self.load_network(self.model, 'pred', self.opts.num_train_epoch)
        self.model.eval()
        self.model = self.model.cuda(device=self.opts.gpu_id)

        # TODO junzhe option of renderer
        print('self.opts.renderer_opt:', self.opts.renderer_opt)
        if self.opts.renderer_opt == 'nmr':
            from nnutils.nmr import NeuralRenderer
        elif self.opts.renderer_opt == 'nmr_kaolin':
            from nnutils.nmr_kaolin import NeuralRenderer
        elif self.opts.renderer_opt == 'dibr_kaolin':
            from nnutils.dibr_kaolin import NeuralRenderer
        else:
            raise NotImplementedError

        self.renderer = NeuralRenderer(opts.img_size,
                                       uv_sampler=self.model.uv_sampler)

        if opts.texture:
            self.tex_renderer = NeuralRenderer(
                opts.img_size, uv_sampler=self.model.uv_sampler)
            # Only use ambient light for tex renderer
            self.tex_renderer.ambient_light_only()

        if opts.use_sfm_ms:
            anno_sfm_path = osp.join(opts.cub_cache_dir, 'sfm',
                                     'anno_testval.mat')
            anno_sfm = sio.loadmat(anno_sfm_path,
                                   struct_as_record=False,
                                   squeeze_me=True)
            sfm_mean_shape = torch.Tensor(np.transpose(
                anno_sfm['S'])).cuda(device=opts.gpu_id)
            self.sfm_mean_shape = Variable(sfm_mean_shape, requires_grad=False)
            self.sfm_mean_shape = self.sfm_mean_shape.unsqueeze(0).repeat(
                opts.batch_size, 1, 1)
            sfm_face = torch.LongTensor(anno_sfm['conv_tri'] -
                                        1).cuda(device=opts.gpu_id)
            self.sfm_face = Variable(sfm_face, requires_grad=False)
            faces = self.sfm_face.view(1, -1, 3)
        else:
            # For visualization
            faces = self.model.faces.view(1, -1, 3)
        self.faces = faces.repeat(opts.batch_size, 1, 1)
        self.vis_rend = bird_vis.VisRenderer(opts.img_size,
                                             faces.data.cpu().numpy(),
                                             self.opts,
                                             uv_sampler=self.model.uv_sampler)
        self.vis_rend.set_bgcolor([1., 1., 1.])

        self.resnet_transform = torchvision.transforms.Normalize(
            mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
Exemple #3
0
    def define_model(self):
        # 2
        opts = self.opts
        # import pdb; pdb.set_trace()

        # Options
        self.symmetric = opts.symmetric
        anno_sfm_path = os.path.join(opts.cub_cache_dir, 'sfm',
                                     'anno_train.mat')
        anno_sfm = sio.loadmat(anno_sfm_path,
                               struct_as_record=False,
                               squeeze_me=True)
        sfm_mean_shape = (np.transpose(anno_sfm['S']),
                          anno_sfm['conv_tri'] - 1)

        img_size = (opts.img_size, opts.img_size)
        self.model = mesh_net.MeshNet(img_size,
                                      opts,
                                      nz_feat=opts.nz_feat,
                                      num_kps=opts.num_kps,
                                      sfm_mean_shape=sfm_mean_shape)

        if opts.num_pretrain_epochs > 0:
            self.load_network(self.model, 'pred', opts.num_pretrain_epochs)

        self.model = self.model.to(self.device)

        # Data structures to use for triangle priors.
        edges2verts = self.model.edges2verts
        # B x E x 4
        edges2verts = np.tile(np.expand_dims(edges2verts, 0),
                              (opts.batch_size, 1, 1))
        self.edges2verts = Variable(
            torch.LongTensor(edges2verts).cuda(device=self.device),
            requires_grad=False)
        # For renderering.
        faces = self.model.faces.view(1, -1, 3)
        self.faces = faces.repeat(opts.batch_size, 1, 1)
        self.renderer = NeuralRenderer(opts.img_size)
        self.renderer_predcam = NeuralRenderer(
            opts.img_size)  #for camera loss via projection

        # Need separate NMR for each fwd/bwd call.
        if opts.texture:
            self.tex_renderer = NeuralRenderer(opts.img_size)
            # Only use ambient light for tex renderer
            self.tex_renderer.ambient_light_only()

        # For visualization
        self.vis_rend = bird_vis.VisRenderer(opts.img_size,
                                             faces.data.cpu().numpy())

        # import ipdb
        # ipdb.set_trace()
        # for k,v in self.model.named_modules():
        #         v.register_backward_hook(hook)

        return
Exemple #4
0
    def __init__(self, opts):
        self.opts = opts

        self.symmetric = opts.symmetric
        #img_size是(256,256)
        img_size = (opts.img_size, opts.img_size)
        print('Setting up model..')
        #-----------------目前猜測是在這一行的什後從mean mesh變成learned mesh的
        #        print(opts.nz_feat)
        #        exit()
        #nz_feat目前不確定是哪冒出來的,還要找源頭
        #nz_feat 為200
        self.model = mesh_net.MeshNet(img_size, opts, nz_feat=opts.nz_feat)
        #-----------------------------------經這一個之後就被改變了得到一個337的verts,但原本的verts至少有600個所以它可能是將某些點更動了,
        # 也可能是它會透過對稱的手法來變成完整的mean shape
        self.load_network(self.model, 'pred', self.opts.num_train_epoch)
        #model 從training()模式轉換成評估模式
        self.model.eval()

        self.model = self.model.cuda(device=self.opts.gpu_id)

        self.renderer = NeuralRenderer(opts.img_size)

        if opts.texture:  #--------------------這個只是true而已
            self.tex_renderer = NeuralRenderer(opts.img_size)
            # Only use ambient light for tex renderer
            self.tex_renderer.ambient_light_only()
#--------------------------------這邊將initial mean shape拿進去訓練得到 訓練過後的learned mean shape
#----------------是否使用use_sfm_ms(它門預設都沒有,這個mesh非常的簡陋,它必須經過學習才會得到一個mean shape
        if opts.use_sfm_ms:
            anno_sfm_path = osp.join(opts.cub_cache_dir, 'sfm',
                                     'anno_testval.mat')
            anno_sfm = sio.loadmat(anno_sfm_path,
                                   struct_as_record=False,
                                   squeeze_me=True)
            sfm_mean_shape = torch.Tensor(np.transpose(
                anno_sfm['S'])).cuda(device=opts.gpu_id)
            self.sfm_mean_shape = Variable(sfm_mean_shape, requires_grad=False)
            self.sfm_mean_shape = self.sfm_mean_shape.unsqueeze(0).repeat(
                opts.batch_size, 1, 1)
            sfm_face = torch.LongTensor(anno_sfm['conv_tri'] -
                                        1).cuda(device=opts.gpu_id)
            self.sfm_face = Variable(sfm_face, requires_grad=False)
            faces = self.sfm_face.view(1, -1, 3)
#-------------------------------------------
        else:
            # For visualization
            faces = self.model.faces.view(1, -1, 3)

        self.faces = faces.repeat(opts.batch_size, 1, 1)
        #--------------------------------------這邊會到vis render init()
        self.vis_rend = bird_vis.VisRenderer(opts.img_size,
                                             faces.data.cpu().numpy())
        self.vis_rend.set_bgcolor([1., 1., 1.])
        self.resnet_transform = torchvision.transforms.Normalize(
            mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
Exemple #5
0
    def define_model(self):
        opts = self.opts

        # ----------
        # Options
        # ----------
        #是否對稱
        self.symmetric = opts.symmetric
        #--------------parker 不確定這是幹麻的
        #--------------anno_sfm_path會得到一個路徑
        anno_sfm_path = osp.join(opts.cub_cache_dir, 'sfm', 'anno_train.mat')
        #----anno_train.mat是只有15個點的最基礎的鳥模型
        anno_sfm = sio.loadmat(anno_sfm_path,
                               struct_as_record=False,
                               squeeze_me=True)
        #----------將anno_sfm["S"]是15個點的vertex,anno_sfm["conv_tri"]是15個點組成的面
        sfm_mean_shape = (np.transpose(anno_sfm['S']),
                          anno_sfm['conv_tri'] - 1)
        #--------------
        img_size = (opts.img_size, opts.img_size)
        #這邊會進去mesh_net.py的MeshNet
        self.model = mesh_net.MeshNet(img_size,
                                      opts,
                                      nz_feat=opts.nz_feat,
                                      num_kps=opts.num_kps,
                                      sfm_mean_shape=sfm_mean_shape)
        #-----如果有已經訓練過得epochs則執行這一行
        if opts.num_pretrain_epochs > 0:
            self.load_network(self.model, 'pred', opts.num_pretrain_epochs)

        self.model = self.model.cuda(device=opts.gpu_id)

        # Data structures to use for triangle priors.
        #---------這邊是拿取modle已經計算好的eges2verts
        edges2verts = self.model.edges2verts
        # B x E x 4
        edges2verts = np.tile(np.expand_dims(edges2verts, 0),
                              (opts.batch_size, 1, 1))
        self.edges2verts = Variable(
            torch.LongTensor(edges2verts).cuda(device=opts.gpu_id),
            requires_grad=False)
        # For renderering.
        faces = self.model.faces.view(1, -1, 3)
        self.faces = faces.repeat(opts.batch_size, 1, 1)
        #include nmr並且取名叫做Neural Renderer
        self.renderer = NeuralRenderer(opts.img_size)
        self.renderer_predcam = NeuralRenderer(
            opts.img_size)  #for camera loss via projection

        #如果要計算texture的話,會執行下面這一行,但我反而不懂,為什麼要執行這一行
        # Need separate NMR for each fwd/bwd call.
        if opts.texture:
            self.tex_renderer = NeuralRenderer(opts.img_size)
            # Only use ambient light for tex renderer
            self.tex_renderer.ambient_light_only()

        # For visualization
        self.vis_rend = bird_vis.VisRenderer(opts.img_size,
                                             faces.data.cpu().numpy())

        # import ipdb
        # ipdb.set_trace()
        # for k,v in self.model.named_modules():
        #         v.register_backward_hook(hook)

        return