Пример #1
0
def getLoader(db):
    if db == 'syn':
        loader = dataloader.TestLoader(f_test)
    elif db == 'human36':
        loader = dataloader.Human36Loader()
    elif db == 'cad120':
        loader = dataloader.Cad120Loader()
    elif db == 'biwi':
        loader = dataloader.BIWILoader()
    elif db == 'biwiid':
        loader = dataloader.BIWIIDLoader()
    return loader
Пример #2
0
def testBIWI(model,modelin=args.model,outfile=args.out,feature_transform=args.feat_trans):
    if modelin != "":
        model.load_state_dict(torch.load(modelin))
    model.eval()

    # load 3dmm data
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float()
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    shape = mu_lm
    shape[:,2] = shape[:,2] * -1

    loader = dataloader.BIWILoader()
    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    for sub in range(len(loader)):
        batch = loader[sub]

        x_cam_gt = batch['x_cam_gt']
        x_w_gt = batch['x_w_gt']
        f_gt = batch['f_gt']
        x_img = batch['x_img']
        x_img_gt = batch['x_img_gt']
        M = x_img_gt.shape[0]

        one  = torch.ones(M,1,68)
        x_img_one = torch.cat([x_img,one],dim=1)

        # run the model
        out, trans, transfeat = model(x_img_one)
        alphas = out[:,:199].mean(0)
        f = torch.relu(out[:,199]).mean()
        K = torch.zeros((3,3)).float()
        K[0,0] = f;
        K[1,1] = f;
        K[2,2] = 1;
        K[0,2] = 320;
        K[1,2] = 240;
        Xc,R,T = util.EPnP(x_img,shape,K)

        # apply 3DMM model from predicted parameters
        reproj_errors2 = util.getReprojError2(x_img,shape,R,T,K)
        reproj_errors3 = util.getReprojError3(x_cam_gt,shape,R,T)
        rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T)

        reproj_error = reproj_errors2.mean()
        reconstruction_error = reproj_errors3.mean()
        rel_error = rel_errors.mean()
        f_error = torch.abs(f_gt - f) / f_gt

        seterror_2d.append(reproj_error.cpu().data.item())
        seterror_3d.append(reconstruction_error.cpu().data.item())
        seterror_rel3d.append(rel_error.cpu().data.item())
        seterror_relf.append(f_error.cpu().data.item())

        print(f"fgt: {f_gt.mean().item():.3f}  | f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
        #end for

    matdata = {}
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Пример #3
0
def testBIWI(modelin=args.model,outfile=args.out,optimize=args.opt):
    # define model, dataloader, 3dmm eigenvectors, optimization method
    calib_net = CalibrationNet3(n=1)
    sfm_net = CalibrationNet3(n=199)
    if modelin != "":
        calib_path = os.path.join('model','calib_' + modelin)
        sfm_path = os.path.join('model','sfm_' + modelin)
        calib_net.load_state_dict(torch.load(calib_path))
        sfm_net.load_state_dict(torch.load(sfm_path))
    calib_net.eval()
    sfm_net.eval()

    # mean shape and eigenvectors for 3dmm
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach()
    mu_lm[:,2] = mu_lm[:,2]*-1
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().detach()
    sigma = torch.from_numpy(data3dmm.sigma).float().detach()
    sigma = torch.diag(sigma.squeeze())
    lm_eigenvec = torch.mm(lm_eigenvec, sigma)

    # define loader
    loader = dataloader.BIWILoader()
    f_pred = []
    shape_pred = []
    error_2d = []
    error_relf = []
    error_rel3d = []
    for sub in range(len(loader)):
        batch = loader[sub]
        x_cam_gt = batch['x_cam_gt']
        fgt = batch['f_gt']
        x_img = batch['x_img']
        x_img_gt = batch['x_img_gt']
        M = x_img_gt.shape[0]
        N = 68


        ptsI = x_img.reshape((M,N,2)).permute(0,2,1)
        x = ptsI.unsqueeze(0).permute(0,2,1,3)

        # run the model
        f = calib_net(x) + 300
        betas = sfm_net(x)
        betas = betas.squeeze(0).unsqueeze(-1)
        shape = mu_lm + torch.mm(lm_eigenvec,betas).squeeze().view(N,3)

        # additional optimization on initial solution
        if optimize:
            calib_net.load_state_dict(torch.load(calib_path))
            sfm_net.load_state_dict(torch.load(sfm_path))
            calib_net.eval()
            sfm_net.eval()
            trainfc(calib_net)
            trainfc(sfm_net)

            opt1 = torch.optim.Adam(calib_net.parameters(),lr=1e-4)
            opt2 = torch.optim.Adam(sfm_net.parameters(),lr=1e-5)
            curloss = 100

            for outerloop in itertools.count():

                # camera calibration
                shape = shape.detach()
                for iter in itertools.count():
                    opt1.zero_grad()
                    f = calib_net.forward2(x) + 300
                    K = torch.zeros(3,3).float()
                    K[0,0] = f
                    K[1,1] = f
                    K[2,2] = 1

                    f_error = torch.mean(torch.abs(f - fgt))
                    #rmse = torch.norm(shape_gt - shape,dim=1).mean()

                    # differentiable PnP pose estimation
                    km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K)
                    Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)
                    error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2')
                    error_time = util.getTimeConsistency(shape,R,T)

                    loss = error2d.mean() + 0.01*error_time
                    if iter == 5: break
                    if iter > 10 and prev_loss < loss:
                        break
                    else:
                        prev_loss = loss
                    loss.backward()
                    opt1.step()
                    print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | error2d: {error2d.mean().item():.3f} ")

                # sfm
                f = f.detach()
                for iter in itertools.count():
                    opt2.zero_grad()

                    # shape prediction
                    betas = sfm_net.forward2(x)
                    shape = torch.sum(betas * lm_eigenvec,1)
                    shape = shape.reshape(68,3) + mu_lm
                    shape = shape - shape.mean(0).unsqueeze(0)
                    K = torch.zeros((3,3)).float()
                    K[0,0] = f
                    K[1,1] = f
                    K[2,2] = 1

                    #rmse = torch.norm(shape_gt - shape,dim=1).mean().detach()
                    #rmse = torch.norm(shape_gt - shape,dim=1).mean().detach()

                    # differentiable PnP pose estimation
                    km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
                    Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)
                    error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2')
                    error_time = util.getTimeConsistency(shape,R,T)

                    loss = error2d.mean() + 0.01*error_time
                    if iter == 5: break
                    if iter > 10 and prev_loss < loss:
                        break
                    else:
                        prev_loss = loss
                    loss.backward()
                    opt2.step()
                    print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | error2d: {error2d.mean().item():.3f} ")

                # closing condition for outerloop on dual objective
                if torch.abs(curloss - loss) < 0.01: break
                curloss = loss
        else:
            K = torch.zeros(3,3).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1
            km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
            Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)

        # get errors
        reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K)
        rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T)

        reproj_error = reproj_errors2.mean()
        rel_error = rel_errors.mean()
        f_error = torch.abs(fgt - f) / fgt

        # save final prediction
        f_pred.append(f.detach().cpu().item())
        shape_pred.append(shape.detach().cpu().numpy())

        error_2d.append(reproj_error.cpu().data.item())
        error_rel3d.append(rel_error.cpu().data.item())
        error_relf.append(f_error.cpu().data.item())

        print(f" f/fgt: {f[0].item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
        #end for

    # prepare output file
    out_shape = np.stack(shape_pred)
    out_f = np.stack(f_pred)

    matdata = {}
    matdata['shape'] = np.stack(out_shape)
    matdata['f'] = np.stack(out_f)
    matdata['error_2d'] = np.array(error_2d)
    matdata['error_rel3d'] = np.array(error_rel3d)
    matdata['error_relf'] = np.array(error_relf)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(error_2d)}")
    print(f"MEAN seterror_rel3d: {np.mean(error_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(error_relf)}")
Пример #4
0
def testBIWI(model,modelin=args.model,outfile=args.out,feature_transform=args.feat_trans):
    if modelin != "":
        model.load_state_dict(torch.load(modelin))
    model.cuda()

    # load 3dmm data
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float().cuda()
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().cuda()

    loader = dataloader.BIWILoader()
    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    for sub in range(len(loader)):
        batch = loader[sub]


        x_cam_gt = batch['x_cam_gt'].cuda()
        x_w_gt = batch['x_w_gt'].cuda()
        f_gt = batch['f_gt'].cuda()
        x_img = batch['x_img'].cuda()
        x_img_gt = batch['x_img_gt'].cuda()
        M = x_img_gt.shape[0]

        one  = torch.ones(M,1,68).cuda()
        x_img_one = torch.cat([x_img,one],dim=1)

        # run the model
        out, trans, transfeat = model(x_img_one)
        alphas = out[:,:199].mean(0)
        f = torch.relu(out[:,199]).mean()
        K = torch.zeros((3,3)).float().cuda()
        K[0,0] = f;
        K[1,1] = f;
        K[2,2] = 1;
        K[0,2] = 320;
        K[1,2] = 240;

        # apply 3DMM model from predicted parameters
        alpha_matrix = torch.diag(alphas)
        shape_cov = torch.mm(lm_eigenvec,alpha_matrix)
        s = shape_cov.sum(1).view(68,3)
        shape = (mu_lm + s)
        shae[:,2] = shape[:,2]*-1

        # run epnp algorithm
        # get control points
        c_w = util.getControlPoints(shape)

        # solve alphas
        alphas = util.solveAlphas(shape,c_w)

        # setup M
        px = 320;
        py = 240;

        Matrix = util.setupM(alphas,x_img.permute(0,2,1),px,py,f)

        # get eigenvectors of M for each view
        u,d,v = torch.svd(Matrix)

        #solve N=1
        c_c_n1 = v[:,:,-1].reshape((M,4,3)).permute(0,2,1)
        _ , x_c_n1, _ = util.scaleControlPoints(c_c_n1,c_w[:3,:],alphas,shape)
        Rn1,Tn1 = util.getExtrinsics(x_c_n1,shape)
        reproj_error2_n1 = util.getReprojError2(x_img,shape,Rn1,Tn1,K)
        reproj_error3_n1 = util.getReprojError3(x_cam_gt,shape,Rn1,Tn1)
        rel_error_n1 = util.getRelReprojError3(x_cam_gt,shape,Rn1,Tn1)

        # solve N=2
        # get distance contraints
        d12,d13,d14,d23,d24,d34 = util.getDistances(c_w)
        distances = torch.stack([d12,d13,d14,d23,d24,d34])**2
        beta_n2 = util.getBetaN2(v[:,:,-2:],distances)
        c_c_n2 = util.getControlPointsN2(v[:,:,-2:],beta_n2)
        _,x_c_n2,_ = util.scaleControlPoints(c_c_n2,c_w[:3,:],alphas,shape)
        Rn2,Tn2 = util.getExtrinsics(x_c_n2,shape)
        reproj_error2_n2 = util.getReprojError2(x_img,shape,Rn2,Tn2,K)
        reproj_error3_n2 = util.getReprojError3(x_cam_gt,shape,Rn2,Tn2)
        rel_error_n2 = util.getRelReprojError3(x_cam_gt,shape,Rn1,Tn1)

        mask = reproj_error2_n1 < reproj_error2_n2


        reproj_errors = torch.cat((reproj_error2_n1[mask],reproj_error2_n2[~mask]))
        rmse_errors = torch.cat((reproj_error3_n1[mask],reproj_error3_n2[~mask]))
        rel_errors = torch.cat((rel_error_n2[~mask],rel_error_n1[mask]))

        # errors
        reproj_error = torch.mean(reproj_errors)
        reconstruction_error = torch.mean(rmse_errors)
        rel_error = torch.mean(rel_errors)
        f_error = torch.abs(f_gt - f) / f_gt

        seterror_2d.append(reproj_error.cpu().data.item())
        seterror_3d.append(reconstruction_error.cpu().data.item())
        seterror_rel3d.append(rel_error.cpu().data.item())
        seterror_relf.append(f_error.cpu().data.item())

        print(f"fgt: {f_gt.mean().item():.3f}  | f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
        #end for

    matdata = {}
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")