예제 #1
0
    def __init__(self, **kvs):
        torch.set_grad_enabled(False)

        # load BFM
        self.bfm = BFMModel(bfm_fp=kvs.get(
            'bfm_fp', make_abs_path('configs/bfm_noneck_v3.pkl')),
                            shape_dim=kvs.get('shape_dim', 40),
                            exp_dim=kvs.get('exp_dim', 10))
        self.tri = self.bfm.tri

        # config
        self.gpu_mode = kvs.get('gpu_mode', False)
        self.gpu_id = kvs.get('gpu_id', 0)
        self.size = kvs.get('size', 120)
        if self.gpu_mode:
            print(f"Loading 3DDFA_v2 to GPU (id={self.gpu_id})...")

        param_mean_std_fp = kvs.get(
            'param_mean_std_fp',
            make_abs_path(
                f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl'))

        # load model, default output is dimension with length 62 = 12(pose) + 40(shape) +10(expression)
        model = getattr(models,
                        kvs.get('arch'))(num_classes=kvs.get('num_params', 62),
                                         widen_factor=kvs.get(
                                             'widen_factor', 1),
                                         size=self.size,
                                         mode=kvs.get('mode', 'small'))
        model = load_model(model, kvs.get('checkpoint_fp'))

        if self.gpu_mode:
            print("Loading 3DDFA_v2 model to GPU...")
            cudnn.benchmark = True
            model = model.cuda(device=self.gpu_id)

        self.model = model
        self.model.eval()  # eval mode, fix BN

        # data normalization
        transform_normalize = NormalizeGjz(mean=127.5, std=128)
        transform_to_tensor = ToTensorGjz()
        transform = Compose([transform_to_tensor, transform_normalize])
        self.transform = transform

        # params normalization config
        r = _load(param_mean_std_fp)
        self.param_mean = r.get('mean')
        self.param_std = r.get('std')
예제 #2
0
    def __init__(self, **kvs):
        torch.set_grad_enabled(False)

        # config
        self.gpu_mode = kvs.get('gpu_mode', False)
        self.gpu_id = kvs.get('gpu_id', 0)
        self.size = kvs.get('size', 120)

        param_mean_std_fp = kvs.get(
            'param_mean_std_fp',
            make_abs_path(
                f'configs/param_mean_std_62d_{self.size}x{self.size}.pkl'))

        # load model, 62 = 12(pose) + 40(shape) +10(expression)
        model = getattr(models,
                        kvs.get('arch'))(num_classes=kvs.get('num_params', 62),
                                         widen_factor=kvs.get(
                                             'widen_factor', 1),
                                         size=self.size,
                                         mode=kvs.get('mode', 'small'))
        model = load_model(model, kvs.get('checkpoint_fp'))

        if self.gpu_mode:
            cudnn.benchmark = True
            model = model.cuda(device=self.gpu_id)

        self.model = model
        self.model.eval()  # eval mode, fix BN

        # data normalization
        transform_normalize = NormalizeGjz(mean=127.5, std=128)
        transform_to_tensor = ToTensorGjz()
        transform = Compose([transform_to_tensor, transform_normalize])
        self.transform = transform

        # params normalization config
        r = _load(param_mean_std_fp)
        self.param_mean = r.get('mean')
        self.param_std = r.get('std')
예제 #3
0
def convert_to_onnx(**kvs):
    # 1. load model
    size = kvs.get('size', 120)
    model = getattr(models,
                    kvs.get('arch'))(num_classes=kvs.get('num_params', 62),
                                     widen_factor=kvs.get('widen_factor', 1),
                                     size=size,
                                     mode=kvs.get('mode', 'small'))
    checkpoint_fp = kvs.get('checkpoint_fp')
    model = load_model(model, checkpoint_fp)
    model.eval()

    # 2. convert
    batch_size = 1
    dummy_input = torch.randn(batch_size, 3, size, size)
    wfp = checkpoint_fp.replace('.pth', '.onnx')
    torch.onnx.export(model, (dummy_input, ),
                      wfp,
                      input_names=['input'],
                      output_names=['output'])
    print(f'Convert {checkpoint_fp} to {wfp} done.')
    return wfp