Ejemplo n.º 1
0
def optimizef(xI, xW, f):

    # create optimization parameters
    varf = Variable(f, requires_grad=True)
    optimizer = torch.optim.Adam([varf], lr=1)

    # intitialize intial values
    minerror = 10000
    convergence = 0
    for iter in itertools.count():

        # create K and inverse of K
        K = torch.zeros((3, 3))
        K[0, 0] = varf
        K[1, 1] = varf
        K[2, 2] = 1

        kinv = torch.zeros(3, 3).float()
        kinv[0, 0] = 1 / varf
        kinv[1, 1] = 1 / varf
        kinv[2, 2] = 1

        # get error
        #reproj_errors2 = util.getReprojError2(sequence,shape,R,T,K)
        #reproj_errors3 = util.getReprojError3(x_cam_gt,shape,varR,varT)
        #error_3d = util.getRelReprojError3(x_cam_gt,shape,R,T).mean()
        #error_Rconsistency = util.getRConsistency(R)
        #error_Tconsistency = util.getTConsistency(T)*0.001
        #error_3dconsistency = util.get3DConsistency(sequence,shape,kinv,R,T)

        Xc, R, T = util.EPnP(xI, xW, K)
        optimizer.zero_grad()

        error_3dconsistency = util.get3DConsistency(xI, xW, kinv, R, T)
        loss = error_3dconsistency

        # determine optimization convergence
        if loss < minerror:
            minerror = loss
            optf = varf.item()
            optR = R
            optT = T
            convergence = 0
        else:
            convergence += 1

        loss.backward()
        optimizer.step()
        delta = K[0, 0] - varf
        direction = torch.sign(delta)

        print(
            f"iter: {iter} | loss: {loss.item():.3f} | f: {f.item():.3f} |  error 3d: {error_3dconsistency.item():.3f} | delta: {delta.item():.3f}"
        )

    return optf, optR, optT
Ejemplo n.º 2
0
def train(modelin=args.model, modelout=args.out):
    # define logger
    #torch.manual_seed(6)
    #if log:
    #    logger = Logger(logname)
    # define model, dataloader, 3dmm eigenvectors, optimization method
    torch.manual_seed(2)
    calib_net = Model1(k=1, feature_transform=False)
    sfm_net = Model1(k=199, feature_transform=False)
    #if modelin != "":
    #    model.load_state_dict(torch.load(modelin))
    opt1 = torch.optim.Adam(calib_net.parameters(), lr=1e-1)
    opt2 = torch.optim.Adam(sfm_net.parameters(), lr=1e-1)

    # dataloader
    #data = dataloader.Data()
    #loader = data.batchloader
    #loader = dataloader.BIWILoader()
    loader = dataloader.SyntheticLoader()

    # mean shape and eigenvectors for 3dmm
    mu_lm = torch.from_numpy(loader.mu_lm).float()
    #mu_lm[:,2] = mu_lm[:,2] * -1
    shape = mu_lm
    lm_eigenvec = torch.from_numpy(loader.lm_eigenvec).float()

    # main training loop
    for epoch in itertools.count():
        for j, data in enumerate(loader):

            M = loader.M
            N = loader.N

            # get the input and gt values
            x_cam_gt = data['x_cam_gt']
            shape_gt = data['x_w_gt']
            fgt = data['f_gt']
            x_img = data['x_img']
            x_img_gt = data['x_img_gt']

            x_img_pts = x_img.reshape((M, N, 2)).permute(0, 2, 1)
            one = torch.ones(M * N, 1)
            x_img_one = torch.cat([x_img, one], dim=1)
            x_cam_pt = x_cam_gt.permute(0, 2, 1).reshape(M * N, 3)
            x = x_img_one.permute(1, 0)

            # get initial values for betas and alphas of EPNP
            ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1)

            fvals = []
            errors = []
            for outerloop in itertools.count():

                # calibration
                shape = shape.detach()
                for iter in itertools.count():
                    opt1.zero_grad()

                    # focal length prediction
                    f, _, _ = calib_net(x.unsqueeze(0))
                    f = f + 300
                    K = torch.zeros((3, 3)).float()
                    K[0, 0] = f
                    K[1, 1] = f
                    K[2, 2] = 1

                    # RMSE between GT and predicted shape
                    rmse = torch.norm(shape_gt - shape, dim=1).mean().detach()

                    # error f
                    error_f = torch.mean(torch.abs(f - fgt))

                    # differentiable PnP pose estimation
                    km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
                    Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas,
                                                     alphas, shape, ptsI, K)
                    error2d = util.getReprojError2(ptsI,
                                                   shape,
                                                   R,
                                                   T,
                                                   K,
                                                   show=False,
                                                   loss='l1')
                    loss = error2d.mean() + error_f
                    if iter > 20 and prev_loss < loss:
                        break
                    else:
                        prev_loss = loss
                    loss.backward()
                    opt1.step()
                    print(
                        f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | rmse: {rmse.item():.2f}"
                    )

                # sfm
                f = f.detach()
                for iter in itertools.count():
                    opt2.zero_grad()

                    # shape prediction
                    betas, _, _ = sfm_net(x.unsqueeze(0))
                    shape = torch.sum(betas * lm_eigenvec, 1)
                    shape = shape.reshape(68, 3) + mu_lm
                    K = torch.zeros((3, 3)).float()
                    K[0, 0] = f
                    K[1, 1] = f
                    K[2, 2] = 1

                    # RMSE between GT and predicted shape
                    rmse = torch.norm(shape_gt - shape, dim=1).mean().detach()

                    # differentiable PnP pose estimation
                    km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
                    Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas,
                                                     alphas, shape, ptsI, K)
                    error2d = util.getReprojError2(ptsI,
                                                   shape,
                                                   R,
                                                   T,
                                                   K,
                                                   show=False,
                                                   loss='l2')
                    loss = error2d.mean()
                    if iter > 20 and prev_loss < loss:
                        break
                    else:
                        prev_loss = loss
                    loss.backward()
                    opt2.step()
                    print(
                        f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | rmse: {rmse.item():.2f}"
                    )

                if outerloop == 2: break

            # get errors
            reproj_errors2 = util.getReprojError2(ptsI, shape, R, T, K)
            reproj_errors3 = util.getReprojError3(x_cam_gt, shape, R, T)
            rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(fgt - f) / fgt

            print(
                f"f/fgt: {f[0].item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}"
            )
            #end for

        torch.save(sfm_net.state_dict(),
                   os.path.join('model', 'sfm_' + modelout))
        torch.save(calib_net.state_dict(),
                   os.path.join('model', 'calib_' + modelout))
Ejemplo n.º 3
0
def test(modelin=args.model,outfile=args.out,optimize=args.opt):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    calib_net = CalibrationNet3(n=1)
    sfm_net = CalibrationNet3(n=199)
    if modelin != "":
        calib_path = os.path.join('model','calib_' + modelin)
        sfm_path = os.path.join('model','sfm_' + modelin)
        calib_net.load_state_dict(torch.load(calib_path))
        sfm_net.load_state_dict(torch.load(sfm_path))
    calib_net.eval()
    sfm_net.eval()

    # mean shape and eigenvectors for 3dmm
    M = 100
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach()
    mu_lm[:,2] = mu_lm[:,2]*-1
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().detach()
    sigma = torch.from_numpy(data3dmm.sigma).float().detach()
    sigma = torch.diag(sigma.squeeze())
    lm_eigenvec = torch.mm(lm_eigenvec, sigma)

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_fpred = []
    all_depth = []
    out_shape = []
    out_f = []

    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [i*100 for i in range(4,15)]
    for f_test in f_vals:
        # create dataloader
        #f_test = 1000
        loader = dataloader.TestLoader(f_test)

        f_pred = []
        shape_pred = []
        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100;
        N = 68;
        batch_size = 1;

        for j,data in enumerate(loader):
            if j == 10: break
            # load the data
            x_cam_gt = data['x_cam_gt']
            shape_gt = data['x_w_gt']
            fgt = data['f_gt']
            x_img = data['x_img']
            x_img_gt = data['x_img_gt']
            T_gt = data['T_gt']

            all_depth.append(np.mean(T_gt[:,2]))
            all_f.append(fgt.numpy()[0])

            ptsI = x_img.reshape((M,N,2)).permute(0,2,1)
            x = ptsI.unsqueeze(0).permute(0,2,1,3)

            # run the model
            f = calib_net(x) + 300
            betas = sfm_net(x)
            betas = betas.squeeze(0).unsqueeze(-1)
            shape = mu_lm + torch.mm(lm_eigenvec,betas).squeeze().view(N,3)

            # additional optimization on initial solution
            if optimize:
                calib_net.load_state_dict(torch.load(calib_path))
                sfm_net.load_state_dict(torch.load(sfm_path))
                calib_net.eval()
                sfm_net.eval()
                trainfc(calib_net)
                trainfc(sfm_net)
                opt1 = torch.optim.Adam(calib_net.parameters(),lr=1e-4)
                opt2 = torch.optim.Adam(sfm_net.parameters(),lr=1e-2)
                curloss = 100
                for outerloop in itertools.count():

                    # camera calibration
                    shape = shape.detach()
                    for iter in itertools.count():
                        opt1.zero_grad()
                        f = calib_net.forward2(x) + 300
                        K = torch.zeros(3,3).float()
                        K[0,0] = f
                        K[1,1] = f
                        K[2,2] = 1

                        f_error = torch.mean(torch.abs(f - fgt))
                        rmse = torch.norm(shape_gt - shape,dim=1).mean()

                        # differentiable PnP pose estimation
                        km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K)
                        Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)
                        error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2')
                        #error2d = util.getReprojError2_(ptsI,Xc,K,show=True,loss='l2')
                        error_time = util.getTimeConsistency(shape,R,T)

                        loss = error2d.mean() + 0.01*error_time
                        if iter == 5: break
                        loss.backward()
                        opt1.step()
                        print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | error2d: {error2d.mean().item():.3f} | rmse: {rmse.item():.3f} ")

                    # sfm
                    f = f.detach()
                    for iter in itertools.count():
                        opt2.zero_grad()

                        # shape prediction
                        betas = sfm_net.forward2(x)
                        shape = torch.sum(betas * lm_eigenvec,1)
                        shape = shape.reshape(68,3) + mu_lm
                        shape = shape - shape.mean(0).unsqueeze(0)
                        K = torch.zeros((3,3)).float()
                        K[0,0] = f
                        K[1,1] = f
                        K[2,2] = 1

                        #rmse = torch.norm(shape_gt - shape,dim=1).mean().detach()
                        rmse = torch.norm(shape_gt - shape,dim=1).mean().detach()

                        # differentiable PnP pose estimation
                        km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
                        Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)
                        error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2')
                        error_time = util.getTimeConsistency(shape,R,T)

                        loss = error2d.mean() + 0.01*error_time
                        if iter == 5: break
                        if iter > 10 and prev_loss < loss:
                            break
                        else:
                            prev_loss = loss
                        loss.backward()
                        opt2.step()
                        print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | error2d: {error2d.mean().item():.3f} | rmse: {rmse.item():.3f} ")

                    # closing condition for outerloop on dual objective
                    if torch.abs(curloss - loss) < 0.01: break
                    curloss = loss
            else:
                K = torch.zeros(3,3).float()
                K[0,0] = f
                K[1,1] = f
                K[2,2] = 1
                km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
                Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)

            all_fpred.append(f.detach().numpy()[0])

            # get errors
            reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K,show=False)
            reproj_errors3 = torch.norm(shape_gt - shape,dim=1).mean()
            rel_errors =  util.getRelReprojError3(x_cam_gt,shape,R,T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(fgt - f) / fgt

            # save final prediction
            f_pred.append(f.detach().cpu().item())
            shape_pred.append(shape.detach().cpu().numpy())

            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())
            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(f"f/sequence: {f_test}/{j}  | f/fgt: {f[0].item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        out_f.append(np.stack(f_pred))
        out_shape.append(np.stack(shape_pred,axis=0))
        print(f"f_error_rel: {avg_relf:.4f}  | rel rmse: {avg_rel3d:.4f}    | 2d error: {reproj_error.item():.4f} |  rmse: {avg_3d:.4f}  |")

    out_shape = np.stack(out_shape)
    out_f = np.stack(out_f)
    all_f = np.stack(all_f).flatten()
    all_fpred = np.stack(all_fpred).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_3d = np.stack(allerror_3d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_fpred'] = np.array(all_fpred)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    matdata['shape'] = np.stack(out_shape)
    matdata['f'] = np.stack(out_f)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 4
0
def train(modelin=args.model,
          modelout=args.out,
          log=args.log,
          logname=args.logname):
    # define logger
    #torch.manual_seed(6)

    # define model, dataloader, 3dmm eigenvectors, optimization method
    torch.manual_seed(2)
    model = Model1(k=199, feature_transform=False)
    if modelin != "":
        model.load_state_dict(torch.load(modelin))
    model.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
    decay = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

    # dataloader
    #data = dataloader.Data()
    #loader = data.batchloader
    loader = dataloader.SyntheticLoader()

    # mean shape and eigenvectors for 3dmm
    mu_lm = torch.from_numpy(loader.mu_lm).float().cuda()
    #mu_lm[:,2] = mu_lm[:,2] * -1
    #shape = mu_lm.detach().cuda()
    lm_eigenvec = torch.from_numpy(loader.lm_eigenvec).float().cuda()

    M = loader.M
    N = loader.N

    # main training loop
    for epoch in itertools.count():
        #for j,batch in enumerate(loader):

        np.random.seed(2)
        for j, data in enumerate(loader):

            # get the input and gt values
            x_cam_gt = data['x_cam_gt'].cuda()
            x_w_gt = data['x_w_gt'].cuda()
            fgt = data['f_gt'].cuda()
            beta_gt = data['beta_gt'].cuda()
            x_img = data['x_img'].cuda()
            #x_img_norm = data['x_img_norm']
            x_img_gt = data['x_img_gt'].cuda()

            #batch_size = fgt.shape[0]
            x_img_pts = x_img.reshape((M, N, 2)).permute(0, 2, 1)

            one = torch.ones(M * N, 1).cuda()
            x_img_one = torch.cat([x_img, one], dim=1)
            x_cam_pt = x_cam_gt.permute(0, 2, 1).reshape(6800, 3)

            # run the model
            x = x_img_one.permute(1, 0)

            # get initial values for betas and alphas of EPNP
            ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1)

            v = pptk.viewer([0, 0, 0])
            v.set(point_size=1)
            # optimize using EPNP+GN
            for iter in itertools.count():
                optimizer.zero_grad()

                # model output
                betas, _, _ = model(x.unsqueeze(0))
                shape = torch.sum(betas * lm_eigenvec, 1)
                shape = shape.reshape(68, 3) + mu_lm

                K = torch.zeros((3, 3)).float().cuda()
                K[0, 0] = 400
                K[1, 1] = 400
                K[2, 2] = 1

                # differentiable pose estimation
                km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
                Xc, R, T, _ = util.optimizeGN(km, c_w, scaled_betas, alphas,
                                              shape, ptsI, K)
                loss = util.getReprojError2(ptsI, shape, R, T, K).mean()
                loss.backward()
                optimizer.step()

                #visualize shape
                v.clear()
                pts = shape.detach().cpu().numpy()
                v.load(pts)
                v.set(r=300)

                print(
                    f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {fgt[0].item():.1f}/{fgt[0].item():.1f}"
                )

        # save model and increment weight decay
        print("saving!")
        torch.save(model.state_dict(), modelout)
        decay.step()
Ejemplo n.º 5
0
def testBIWI(model,modelin=args.model,outfile=args.out,feature_transform=args.feat_trans):
    if modelin != "":
        model.load_state_dict(torch.load(modelin))
    model.eval()

    # load 3dmm data
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float()
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    shape = mu_lm
    shape[:,2] = shape[:,2] * -1

    loader = dataloader.BIWILoader()
    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    for sub in range(len(loader)):
        batch = loader[sub]

        x_cam_gt = batch['x_cam_gt']
        x_w_gt = batch['x_w_gt']
        f_gt = batch['f_gt']
        x_img = batch['x_img']
        x_img_gt = batch['x_img_gt']
        M = x_img_gt.shape[0]

        one  = torch.ones(M,1,68)
        x_img_one = torch.cat([x_img,one],dim=1)

        # run the model
        out, trans, transfeat = model(x_img_one)
        alphas = out[:,:199].mean(0)
        f = torch.relu(out[:,199]).mean()
        K = torch.zeros((3,3)).float()
        K[0,0] = f;
        K[1,1] = f;
        K[2,2] = 1;
        K[0,2] = 320;
        K[1,2] = 240;
        Xc,R,T = util.EPnP(x_img,shape,K)

        # apply 3DMM model from predicted parameters
        reproj_errors2 = util.getReprojError2(x_img,shape,R,T,K)
        reproj_errors3 = util.getReprojError3(x_cam_gt,shape,R,T)
        rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T)

        reproj_error = reproj_errors2.mean()
        reconstruction_error = reproj_errors3.mean()
        rel_error = rel_errors.mean()
        f_error = torch.abs(f_gt - f) / f_gt

        seterror_2d.append(reproj_error.cpu().data.item())
        seterror_3d.append(reconstruction_error.cpu().data.item())
        seterror_rel3d.append(rel_error.cpu().data.item())
        seterror_relf.append(f_error.cpu().data.item())

        print(f"fgt: {f_gt.mean().item():.3f}  | f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
        #end for

    matdata = {}
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 6
0
def test(modelin=args.model,outfile=args.out,optimize=args.opt,ft=args.ft):
    # define model, dataloader, 3dmm eigenvectors, optimization method
    calib_net = PointNet(n=1,feature_transform=ft)
    sfm_net = PointNet(n=199,feature_transform=ft)
    if modelin != "":
        calib_path = os.path.join('model','calib_' + modelin)
        sfm_path = os.path.join('model','sfm_' + modelin)
        calib_net.load_state_dict(torch.load(calib_path))
        sfm_net.load_state_dict(torch.load(sfm_path))
    calib_net.eval()
    sfm_net.eval()

    # mean shape and eigenvectors for 3dmm
    M = 100
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach()
    mu_lm[:,2] = mu_lm[:,2]*-1
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().detach()
    sigma = torch.from_numpy(data3dmm.sigma).float().detach()
    sigma = torch.diag(sigma.squeeze())
    lm_eigenvec = torch.mm(lm_eigenvec, sigma)

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_fpred = []
    all_depth = []
    out_shape = []
    out_f = []
    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [i*100 for i in range(4,15)]

    # set random seed for reproducibility of test set
    np.random.seed(0)
    torch.manual_seed(0)
    for f_test in f_vals:
        # create dataloader
        loader = dataloader.TestLoader(f_test)

        f_pred = []
        shape_pred = []
        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100;
        N = 68;
        batch_size = 1;

        for j,data in enumerate(loader):
            if j >= 10: break
            # load the data
            x_cam_gt = data['x_cam_gt']
            shape_gt = data['x_w_gt']
            fgt = data['f_gt']
            x_img = data['x_img']
            x_img_gt = data['x_img_gt']

            depth = torch.norm(x_cam_gt.mean(2),dim=1)
            all_depth.append(depth.numpy())
            all_f.append(fgt.numpy()[0])

            ptsI = x_img.reshape((M,N,2)).permute(0,2,1)
            x = x_img.unsqueeze(0).permute(0,2,1)

            # run the model
            f = calib_net(x) + 300
            betas = sfm_net(x)
            betas = betas.squeeze(0).unsqueeze(-1)
            shape = mu_lm + torch.mm(lm_eigenvec,betas).squeeze().view(N,3)
            shape = shape - shape.mean(0).unsqueeze(0)

            # get motion measurement guess
            K = torch.zeros((3,3)).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1
            km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
            _, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI)
            error_time = util.getTimeConsistency(shape,R,T)
            if error_time > 10:
                mode='walk'
            else:
                mode='still'

            # apply dual optimization
            if optimize:
                calib_net.load_state_dict(torch.load(calib_path))
                sfm_net.load_state_dict(torch.load(sfm_path))
                shape,K,R,T = dualoptimization(x,calib_net,sfm_net,shape_gt=shape_gt,fgt=fgt,mode=mode)
                f = K[0,0].detach()
            else:
                K = torch.zeros(3,3).float()
                K[0,0] = f
                K[1,1] = f
                K[2,2] = 1
                km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
                Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI)

            # get errors
            reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K,show=False)
            reproj_errors3 = torch.norm(shape_gt - shape,dim=1).mean()
            rel_errors =  util.getRelReprojError3(x_cam_gt,shape,R,T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(fgt - f) / fgt

            # save final prediction
            f_pred.append(f.detach().cpu().item())
            shape_pred.append(shape.detach().cpu().numpy())

            all_fpred.append(f.detach().data.numpy())
            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())
            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(f"f/sequence: {f_test}/{j}  | f/fgt: {f.item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        out_f.append(np.stack(f_pred))
        out_shape.append(np.concatenate(shape_pred,axis=0))
        print(f"f_error_rel: {avg_relf:.4f}  | rel rmse: {avg_rel3d:.4f}    | 2d error: {avg_2d:.4f} |  rmse: {avg_3d:.4f}  |")

    # save output
    out_shape = np.stack(out_shape)
    out_f = np.stack(out_f)
    all_f = np.stack(all_f).flatten()
    all_fpred = np.stack(all_fpred).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_fpred'] = np.array(all_fpred)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    matdata['shape'] = np.stack(out_shape)
    matdata['f'] = np.stack(out_f)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")

    return np.mean(seterror_relf)
Ejemplo n.º 7
0
def train(modelin=args.model,
          modelout=args.out,
          log=args.log,
          logname=args.logname,
          device=args.device):
    # define logger
    if log:
        logger = Logger(logname)

    # define model, dataloader, 3dmm eigenvectors, optimization method
    calib_net = CalibrationNet3(n=1)
    sfm_net = Model1(k=199, feature_transform=False)
    if modelin != "":
        #model_dict = model.state_dict()
        #pretrained_dict = torch.load(modelin)
        #pretrained_dict = {k: v for k,v in pretrained_dict.items() if k in model_dict}
        #model_dict.update(pretrained_dict)
        #model.load_state_dict(pretrained_dict)
        model.load_state_dict(torch.load(modelin))
    #calib_net.to(device=device)
    #sfm_net.to(device=device)
    opt1 = torch.optim.Adam(calib_net.parameters(), lr=1e-1)
    opt2 = torch.optim.Adam(sfm_net.parameters(), lr=1e-1)

    # dataloader
    data = dataloader.Data()
    loader = data.batchloader
    batch_size = data.batchsize

    # mean shape and eigenvectors for 3dmm
    mu_lm = torch.from_numpy(data.mu_lm).float()  #.to(device=device)
    mu_lm[:, 2] = mu_lm[:, 2] * -1
    mu_lm = torch.stack(batch_size * [mu_lm.to(device=device)])
    shape = mu_lm
    lm_eigenvec = torch.from_numpy(data.lm_eigenvec).float().to(device=device)
    lm_eigenvec = torch.stack(batch_size * [lm_eigenvec])

    M = data.M
    N = data.N

    # main training loop
    for epoch in itertools.count():
        for j, batch in enumerate(loader):

            # get the input and gt values
            x_cam_gt = batch['x_cam_gt'].to(device=device)
            shape_gt = batch['x_w_gt'].to(device=device)
            fgt = batch['f_gt'].to(device=device)
            x_img = batch['x_img'].to(device=device)
            #beta_gt = batch['beta_gt'].to(device=device)
            #x_img_norm = batch['x_img_norm']
            #x_img_gt = batch['x_img_gt'].to(device=device)
            batch_size = fgt.shape[0]

            one = torch.ones(batch_size, M * N, 1).to(device=device)
            x_img_one = torch.cat([x_img, one], dim=2)
            x_cam_pt = x_cam_gt.permute(0, 1, 3,
                                        2).reshape(batch_size, 6800, 3)
            x = x_img.permute(0, 2, 1).reshape(batch_size, 2, M, N)

            ptsI = x_img_one.reshape(batch_size, M, N,
                                     3).permute(0, 1, 3, 2)[:, :, :2, :]

            f = calib_net(x)
            f = f + 300
            K = torch.zeros((batch_size, 3, 3)).float().to(device=device)
            K[:, 0, 0] = f.squeeze()
            K[:, 1, 1] = f.squeeze()
            K[:, 2, 2] = 1

            # ground truth l1 error
            opt1.zero_grad()
            f_error = torch.mean(torch.abs(f - fgt))
            f_error.backward()
            opt1.step()

            print(
                f"f/fgt: {f[0].item():.1f}/{fgt[0].item():.1f} | f/fgt: {f[1].item():.1f}/{fgt[1].item():.1f} | f/fgt: {f[2].item():.1f}/{fgt[2].item():.1f} | f/fgt: {f[3].item():.1f}/{fgt[3].item():.1f} "
            )
            continue

            # dual optimization
            for outerloop in itertools.count():
                # calibration
                shape = shape.detach()
                for iter in itertools.count():
                    opt1.zero_grad()
                    f, _, _ = calib_net(x)
                    f = f + 300
                    K = torch.zeros(
                        (batch_size, 3, 3)).float().to(device=device)
                    K[:, 0, 0] = f.squeeze()
                    K[:, 1, 1] = f.squeeze()
                    K[:, 2, 2] = 1

                    # ground truth l1 error
                    f_error = torch.mean(torch.abs(f - fgt))

                    # differentiable PnP pose estimation
                    error1 = []
                    for i in range(batch_size):
                        km, c_w, scaled_betas, alphas = util.EPnP(
                            ptsI[i], shape[i], K[i])
                        Xc, R, T, mask = util.optimizeGN(
                            km, c_w, scaled_betas, alphas, shape[i], ptsI[i],
                            K[i])
                        error2d = util.getReprojError2(ptsI[i],
                                                       shape[i],
                                                       R,
                                                       T,
                                                       K[i],
                                                       show=False,
                                                       loss='l1')
                        error1.append(error2d.mean())

                    # batched loss
                    #loss1 = torch.stack(error1).mean() + f_error
                    loss1 = f_error

                    # stopping condition
                    if iter > 10 and prev_loss < loss1: break
                    else: prev_loss = loss1

                    # optimize network
                    loss1.backward()
                    opt1.step()
                    print(
                        f"iter: {iter} | error: {loss1.item():.3f} | f/fgt: {f[0].item():.1f}/{fgt[0].item():.1f} | f/fgt: {f[1].item():.1f}/{fgt[1].item():.1f} | f/fgt: {f[2].item():.1f}/{fgt[2].item():.1f} | f/fgt: {f[3].item():.1f}/{fgt[3].item():.1f} "
                    )

                # structure from motion
                f = f.detach()
                for iter in itertools.count():
                    opt2.zero_grad()

                    betas, _, _ = sfm_net(x)
                    betas = betas.unsqueeze(-1)
                    shape = mu_lm + torch.bmm(
                        lm_eigenvec, betas).squeeze().view(batch_size, N, 3)

                    K = torch.zeros(
                        (batch_size, 3, 3)).float().to(device=device)
                    K[:, 0, 0] = f.squeeze()
                    K[:, 1, 1] = f.squeeze()
                    K[:, 2, 2] = 1

                    # ground truth shape error
                    error3d = torch.mean(torch.abs(shape - shape_gt))

                    # differentiable PnP pose estimation
                    error2 = []
                    for i in range(batch_size):
                        km, c_w, scaled_betas, alphas = util.EPnP(
                            ptsI[i], shape[i], K[i])
                        Xc, R, T, mask = util.optimizeGN(
                            km, c_w, scaled_betas, alphas, shape[i], ptsI[i],
                            K[i])
                        error2d = util.getReprojError2(ptsI[i],
                                                       shape[i],
                                                       R,
                                                       T,
                                                       K[i],
                                                       show=False,
                                                       loss='l1')
                        error2.append(error2d.mean())

                    # batched loss
                    loss2 = torch.stack(error2).mean() + error3d

                    # stopping condition
                    if iter > 10 and prev_loss < loss2: break
                    else: prev_loss = loss2

                    # optimize network
                    loss2.backward()
                    opt2.step()
                    print(
                        f"iter: {iter} | error: {loss2.item():.3f} | f/fgt: {f[0].item():.1f}/{fgt[0].item():.1f}"
                    )

                # outerloop stopping condition
                if outerloop == 1: break

            # get errors
            rmse = torch.mean(torch.abs(shape - shape_gt))
            f_error = torch.mean(torch.abs(fgt - f) / fgt)

            # get shape error from image projection
            print(
                f"f/fgt: {f[0].item():.3f}/{fgt[0].item():.3f} | rmse: {rmse:.3f} | f_rel: {f_error.item():.4f}  | loss1: {loss1.item():.3f} | loss2: {loss2.item():.3f}"
            )

        # save model and increment weight decay
        print("saving!")
        torch.save(sfm_net.state_dict(),
                   os.path.join('model', 'sfm_' + modelout))
        torch.save(calib_net.state_dict(),
                   os.path.join('model', 'calib_' + modelout))
def test_sfm(modelin=args.model, outfile=args.out, optimize=args.opt):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    calib_net = CalibrationNet3(n=1)
    sfm_net = CalibrationNet3(n=199)
    calib_path = os.path.join('model', 'calib_' + modelin)
    sfm_path = os.path.join('model', 'sfm_' + modelin)

    # mean shape and eigenvectors for 3dmm
    M = 100
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach()
    mu_lm[:, 2] = mu_lm[:, 2] * -1
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().detach()
    sigma = torch.from_numpy(data3dmm.sigma).float().detach()
    sigma = torch.diag(sigma.squeeze())
    lm_eigenvec = torch.mm(lm_eigenvec, sigma)

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_fpred = []
    all_depth = []
    out_shape = []
    out_f = []

    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [i * 100 for i in range(4, 15)]
    for f_test in f_vals:
        # create dataloader
        #f_test = 1000
        loader = dataloader.TestLoader(f_test)

        f_pred = []
        shape_pred = []
        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100
        N = 68
        batch_size = 1

        training_pred = np.zeros((10, 100, 68, 3))
        training_gt = np.zeros((10, 100, 68, 3))

        for j, data in enumerate(loader):
            if j == 10: break
            # load the data
            x_cam_gt = data['x_cam_gt']
            shape_gt = data['x_w_gt']
            fgt = data['f_gt']
            x_img = data['x_img']
            x_img_gt = data['x_img_gt']
            T_gt = data['T_gt']

            all_depth.append(np.mean(T_gt[:, 2]))
            all_f.append(fgt.numpy()[0])

            ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1)
            x = ptsI.unsqueeze(0).permute(0, 2, 1, 3)

            # test camera calibration
            #calib_net.load_state_dict(torch.load(calib_path))
            opt2 = torch.optim.Adam(sfm_net.parameters(), lr=1e-5)
            sfm_net.eval()
            trainfc(sfm_net)
            f = 2000
            for iter in itertools.count():
                opt2.zero_grad()

                # shape prediction
                betas = sfm_net.forward2(x)
                betas = torch.clamp(betas, -20, 20)
                shape = torch.sum(betas * lm_eigenvec, 1)
                shape = shape.reshape(68, 3) + mu_lm
                shape = shape - shape.mean(0).unsqueeze(0)

                rmse = torch.norm(shape_gt - shape, dim=1).mean().detach()
                K = torch.zeros((3, 3)).float()
                K[0, 0] = f
                K[1, 1] = f
                K[2, 2] = 1
                km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
                Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas,
                                                 shape, ptsI, K)
                error2d = util.getReprojError2(ptsI,
                                               shape,
                                               R,
                                               T,
                                               K,
                                               show=False,
                                               loss='l2')
                error_time = util.getTimeConsistency(shape, R, T)

                loss = error2d.mean() + 0.01 * error_time
                loss.backward()
                opt2.step()
                print(
                    f"iter: {iter} | error: {loss.item():.3f} | error2d: {error2d.mean().item():.3f} | rmse: {rmse.item():.3f} "
                )

                if iter == 100: break
                training_pred[j, iter, :, :] = shape.detach().cpu().numpy()
                training_gt[j, iter, :, :] = shape_gt.detach().cpu().numpy()

            # get errors
            reproj_errors2 = util.getReprojError2(ptsI,
                                                  shape,
                                                  R,
                                                  T,
                                                  K,
                                                  show=False)
            reproj_errors3 = torch.norm(shape_gt - shape, dim=1).mean()
            rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(fgt - f) / fgt

            # save final prediction
            shape_pred.append(shape.detach().cpu().numpy())

            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())
            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(
                f"f/sequence: {f_test}/{j}  | f/fgt: {f:.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}"
            )

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        out_shape.append(np.stack(shape_pred, axis=0))
        print(
            f"f_error_rel: {avg_relf:.4f}  | rel rmse: {avg_rel3d:.4f}    | 2d error: {reproj_error.item():.4f} |  rmse: {avg_3d:.4f}  |"
        )

    all_f = np.stack(all_f).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_3d = np.stack(allerror_3d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['training_pred'] = training_pred
    matdata['training_gt'] = training_gt
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile, matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 9
0
def test(modelin=args.model,outfile=args.out,feature_transform=args.feat_trans):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    #if modelin != "":
    #    model.load_state_dict(torch.load(modelin))
    #model.eval()
    #model.cuda()

    # mean shape and eigenvectors for 3dmm
    M = 100
    N = 68
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float()
    mu_lm[:,2] = mu_lm[:,2]*-1
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2)

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_depth = []

    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [400 + i*100 for i in range(4)]

    # set random seed for reproducibility of test set
    np.random.seed(0)
    for f_test in f_vals:
        f_test = 1400
        # create dataloader
        loader = dataloader.TestLoader(f_test)

        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100;
        N = 68;
        batch_size = 1;

        for j, data in enumerate(loader):
            # create a model and optimizer for it
            theta1 = (1.1*torch.randn(4)).requires_grad()
            optimizer = torch.optim.SGD({theta},lr=0.00001)

            model2 = Model1(k=199,feature_transform=False)
            model2.apply(util.init_weights)
            model = Model1(k=1, feature_transform=False)
            model.apply(util.init_weights)
            optimizer = torch.optim.Adam(list(model.parameters()) + list(model2.parameters()),lr=1)

            # load the data
            x_cam_gt = data['x_cam_gt']
            shape_gt = data['x_w_gt']
            fgt = data['f_gt']
            x_img = data['x_img']
            x_img_gt = data['x_img_gt']
            T_gt = data['T_gt']
            all_depth.append(np.mean(T_gt[:,2]))
            all_f.append(fgt.numpy()[0])

            ptsI = x_img.reshape((M,N,2)).permute(0,2,1)
            x2d = x_img.view((M,N,2))
            x_img_pts = x_img.reshape((M,N,2)).permute(0,2,1)
            one = torch.ones(M*N,1)
            x_img_one = torch.cat([x_img,one],dim=1)
            x = x_img_one.permute(1,0)

            ini_pose = torch.zeros((M,6))
            ini_pose[:,5] = 99
            pre_loss = 99
            for iter in itertools.count():
                optimizer.zero_grad()

                # shape prediction
                betas,_,_ = model2(x.unsqueeze(0))
                shape = torch.sum(betas * lm_eigenvec,1)
                shape = shape.reshape(68,3) + mu_lm
                #shape = shape_gt

                # RMSE between GT and predicted shape
                rmse = torch.norm(shape_gt - shape,dim=1).mean().detach()

                # focal length prediction
                f,_,_ = model(x.unsqueeze(0))
                f = f + 300
                K = torch.zeros((3,3)).float()
                K[0,0] = f
                K[1,1] = f
                K[2,2] = 1

                # differentiable PnP pose estimation
                pose = bpnp(x2d,shape,K,ini_pose)
                pred = BPnP.batch_project(pose,shape,K)

                # loss
                #loss = torch.mean(torch.abs(pred - x2d))
                loss = torch.mean(torch.norm(pred - x2d,dim=2))

                loss.backward()
                optimizer.step()
                print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | rmse: {rmse.item():.2f}")
                if iter == 200: break
                ini_pose = pose.detach()

            # get errors
            km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K)
            Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)

            # get errors
            reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K)
            reproj_errors3 = util.getReprojError3(x_cam_gt,shape,R,T)
            rel_errors =  util.getRelReprojError3(x_cam_gt,shape,R,T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(fgt - f) / fgt

            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())

            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(f"f/sequence: {f_test}/{j}  | f/fgt: {f[0].item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
            #end for

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        #end for
        break

    all_f = np.stack(all_f).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_3d = np.stack(allerror_3d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 10
0
def test(modelin=args.model,
         outfile=args.out,
         feature_transform=args.feat_trans):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    #if modelin != "":
    #    model.load_state_dict(torch.load(modelin))
    #model.eval()
    #model.cuda()

    # mean shape and eigenvectors for 3dmm
    M = 100
    N = 68
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float()
    mu_lm[:, 2] = mu_lm[:, 2] * -1
    shape = mu_lm.detach()
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2)

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_depth = []

    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [i * 100 for i in range(4, 21)]
    np.random.seed(0)
    for f_test in f_vals:
        f_test = 1200
        # create dataloader
        loader = dataloader.TestLoader(f_test)

        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100
        N = 68
        batch_size = 1

        for j, data in enumerate(loader):
            # create a model and optimizer for it
            #model2 = Model1(k=199,feature_transform=False)
            #model2.apply(util.init_weights)
            model = Model1(k=1, feature_transform=False)
            model.apply(util.init_weights)
            optimizer = torch.optim.Adam(model.parameters(), lr=2e-1)

            #data = loader[67]
            x_cam_gt = data['x_cam_gt']
            shape = data['x_w_gt']
            fgt = data['f_gt']
            x_img = data['x_img']
            x_img_gt = data['x_img_gt']
            T_gt = data['T_gt']
            all_depth.append(np.mean(T_gt[:, 2]))
            all_f.append(fgt.numpy()[0])

            x_img_pts = x_img.reshape((M, N, 2)).permute(0, 2, 1)
            one = torch.ones(M * N, 1)
            x_img_one = torch.cat([x_img, one], dim=1)
            x_cam_pt = x_cam_gt.permute(0, 2, 1).reshape(M * N, 3)
            x = x_img_one.permute(1, 0)

            ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1)

            for iter in itertools.count():
                optimizer.zero_grad()

                #betas,_,_ = model2(x.unsqueeze(0))
                #shape = torch.sum(betas * lm_eigenvec,1)
                #shape = shape.reshape(68,3) + mu_lm

                f, _, _ = model(x.unsqueeze(0))
                #f = f + 300
                #f = (torch.nn.functional.tanh(f)+1)*850 + 300
                f = f + 300
                #f = torch.nn.functional.sigmoid(f)
                K = torch.zeros((3, 3)).float()
                K[0, 0] = f
                K[1, 1] = f
                K[2, 2] = 1

                # differentiable pose estimation
                km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
                Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas,
                                                 shape, ptsI, K)
                error2d = util.getReprojError2(ptsI,
                                               shape,
                                               R,
                                               T,
                                               K,
                                               show=False,
                                               loss='l1')
                loss = error2d.mean()
                loss.backward()
                if torch.any(model.fc2.weight.grad != model.fc2.weight.grad):
                    print("oh oh something broke")
                    break
                optimizer.step()
                print(
                    f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f}"
                )
                if iter == 200: break

            # get errors
            reproj_errors2 = util.getReprojError2(ptsI, shape, R, T, K)
            reproj_errors3 = util.getReprojError3(x_cam_gt, shape, R, T)
            rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(fgt - f) / fgt

            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())

            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(
                f"f/sequence: {f_test}/{j}  | f/fgt: {f[0].item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}"
            )
            #end for

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        #end for
        break

    all_f = np.stack(all_f).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_3d = np.stack(allerror_3d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile, matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 11
0
def test(modelin=args.model,
         outfile=args.out,
         feature_transform=args.feat_trans):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    #if modelin != "":
    #    model.load_state_dict(torch.load(modelin))
    #model.eval()
    #model.cuda()

    # mean shape and eigenvectors for 3dmm
    M = 100
    N = 68
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float()
    mu_lm[:, 2] = mu_lm[:, 2] * -1
    shape = mu_lm.detach()
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2)

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_depth = []

    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [i * 100 for i in range(4, 21)]
    for f_test in f_vals:
        f_test = 1400
        # create dataloader
        loader = dataloader.TestLoader(f_test)

        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100
        N = 68
        batch_size = 1

        for j, data in enumerate(loader):
            # create a model and optimizer for it
            model = Model2(k=1, feature_transform=False)
            model.apply(util.init_weights)
            optimizer = torch.optim.Adam(model.parameters(), lr=1e-1)

            M = loader.M
            N = loader.N

            # load the data
            T_gt = data['T_gt']
            x_cam_gt = data['x_cam_gt']
            x_w_gt = data['x_w_gt']
            fgt = data['f_gt']
            x_img = data['x_img']
            x_img_gt = data['x_img_gt']

            x_img_pts = x_img.reshape((M, N, 2)).permute(0, 2, 1)
            one = torch.ones(M * N, 1)
            x_img_one = torch.cat([x_img, one], dim=1)
            x_cam_pt = x_cam_gt.permute(0, 2, 1).reshape(M * N, 3)

            all_depth.append(np.mean(T_gt[:, 2]))
            all_f.append(fgt.numpy()[0])

            # create the input
            b = 10
            x = x_img_one.reshape(M, N,
                                  3).reshape(b, M // b, N,
                                             3).reshape(b, M // b * N, 3)
            x = x.permute(0, 2, 1)
            ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1)

            # optimize using EPNP+GN
            fvals = []
            errors = []
            for iter in itertools.count():
                optimizer.zero_grad()

                f, _, _ = model(x)
                #f = f + 1000
                f = torch.nn.functional.leaky_relu(f) + 300
                K = torch.zeros((b, 3, 3)).float()
                K[:, 0, 0] = f.squeeze()
                K[:, 1, 1] = f.squeeze()
                K[:, 2, 2] = 1

                # differentiable pose estimation
                losses = []
                for i in range(b):
                    j = i + 1
                    km, c_w, scaled_betas, alphas = util.EPnP(
                        ptsI[i:j * b], shape, K[i])
                    Xc, R, T, _ = util.optimizeGN(km, c_w, scaled_betas,
                                                  alphas, shape, ptsI[i:j * b],
                                                  K[i])
                    error2d = util.getReprojError2(ptsI[i:j * b], shape, R, T,
                                                   K[i]).mean()
                    losses.append(error2d)
                loss = torch.stack(losses).mean()
                loss.backward()
                optimizer.step()
                print(
                    f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.mean().item():.1f}/{fgt[0].item():.1f}"
                )
                if iter == 100: break

            # get overall poses
            f = f.mean()
            K = torch.zeros((3, 3)).float()
            K[0, 0] = f
            K[1, 1] = f
            K[2, 2] = 1
            km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
            Xc, R, T, _ = util.optimizeGN(km, c_w, scaled_betas, alphas, shape,
                                          ptsI, K)

            # get errors
            reproj_errors2 = util.getReprojError2(ptsI, shape, R, T, K)
            reproj_errors3 = util.getReprojError3(x_cam_gt, shape, R, T)
            rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(fgt - f) / fgt

            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())

            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(
                f"f/sequence: {f_test}/{j}  | f/fgt: {f.item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}"
            )
            #end for

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        #end for
        break

    all_f = np.stack(all_f).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_3d = np.stack(allerror_3d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile, matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 12
0
def train(modelin=args.model,
          modelout=args.out,
          log=args.log,
          logname=args.logname):
    # define logger
    if log:
        logger = Logger(logname)

    # define model, dataloader, 3dmm eigenvectors, optimization method
    #torch.manual_seed(2)
    model = Model1(k=1, feature_transform=True)
    if modelin != "":
        model.load_state_dict(torch.load(modelin))
    model  #.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-1)

    # dataloader
    #data = dataloader.Data()
    #loader = data.batchloader
    loader = dataloader.SyntheticLoader()

    # mean shape and eigenvectors for 3dmm
    mu_lm = torch.from_numpy(loader.mu_lm).float()  #.cuda()
    mu_lm[:, 2] = mu_lm[:, 2] * -1
    shape = mu_lm.detach()  #.cuda()
    lm_eigenvec = torch.from_numpy(loader.lm_eigenvec).float()  #.cuda()

    M = loader.M
    N = loader.N

    # main training loop
    for epoch in itertools.count():
        #for j,batch in enumerate(loader):

        #np.random.seed(0)
        for j, data in enumerate(loader):

            # get the input and gt values
            x_cam_gt = data['x_cam_gt']  #.cuda()
            x_w_gt = data['x_w_gt']  #.cuda()
            fgt = data['f_gt']  #.cuda()
            x_img = data['x_img']  #.cuda()
            x_img_gt = data['x_img_gt']  #.cuda()

            x_img_pts = x_img.reshape((M, N, 2)).permute(0, 2, 1)

            one = torch.ones(M * N, 1)  #.cuda()
            x_img_one = torch.cat([x_img, one], dim=1)
            x_cam_pt = x_cam_gt.permute(0, 2, 1).reshape(M * N, 3)

            # run the model
            x = x_img_one.permute(1, 0)

            # get initial values for betas and alphas of EPNP
            ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1)
            #km, c_w, scaled_betas, alphas = util.EPnP(ptsI,shape,K)
            #Xc,R,T, scaled_betas = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)
            #loss = util.getReprojError2(ptsI,shape,R,T,K).mean()

            # optimize using EPNP+GN
            fvals = []
            errors = []
            for iter in itertools.count():
                optimizer.zero_grad()

                # model output
                f, _, _ = model(x.unsqueeze(0))
                f = torch.nn.functional.leaky_relu(f) + 300
                K = torch.zeros((3, 3)).float()
                K[0, 0] = f
                K[1, 1] = f
                K[2, 2] = 1

                # differentiable pose estimation
                km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
                Xc, R, T, _ = util.optimizeGN(km, c_w, scaled_betas, alphas,
                                              shape, ptsI, K)
                error2d = util.getReprojError2(ptsI,
                                               shape,
                                               R,
                                               T,
                                               K,
                                               show=False).mean()
                errorT = util.getTConsistency(T)
                errorR = util.getRConsistency(R)
                #error3dconsistency = util.get3DConsistency(ptsI,shape,kinv,R,T)
                #loss = error2d + errorT*0.001 + errorR
                loss = error2d
                loss.backward()
                optimizer.step()

                errors.append(loss.detach().cpu().item())
                fvals.append(f.detach().cpu().item())

                data = {}
                data['ptsI'] = ptsI.detach().cpu().numpy()
                data['shape'] = shape.detach().cpu().numpy()
                data['R'] = R.detach().cpu().numpy()
                data['T'] = T.detach().cpu().numpy()
                data['Xc'] = Xc.detach().cpu().numpy()
                data['K'] = K.detach().cpu().numpy()
                data['fvals'] = np.array(fvals)
                data['loss'] = np.array(errors)
                scipy.io.savemat(f"visual/shape{iter:03d}.mat", data)

                print(
                    f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f}"
                )

        # save model and increment weight decay
        print("saving!")
        torch.save(model.state_dict(), modelout)
Ejemplo n.º 13
0
def test(modelin=args.model, outfile=args.out, optimize=args.opt):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    calib_net = PointNet(n=1)
    sfm_net = PointNet(n=199)
    if modelin != "":
        calib_path = os.path.join('model', 'calib_' + modelin)
        sfm_path = os.path.join('model', 'sfm_' + modelin)
        calib_net.load_state_dict(torch.load(calib_path))
        sfm_net.load_state_dict(torch.load(sfm_path))
    calib_net.eval()
    sfm_net.eval()

    68 // 10
    Mvals = [i for i in range(1, 100)]
    Nvals = [i for i in range(3, 68)]
    f_vals = [i * 200 for i in range(2, 7)]

    fpred_mean = np.zeros((100, 65, 5, 5))
    fpred_med = np.zeros((100, 65, 5, 5))
    fpred = np.zeros((100, 65, 5, 5))
    factual = np.zeros((100, 65, 5, 5))
    depth_error = np.zeros((100, 65, 5, 5))

    for i, viewcount in enumerate(Mvals):
        for j, ptcount in enumerate(Nvals):
            for l, ftest in enumerate(f_vals):
                data3dmm = dataloader.MNLoader(M=viewcount,
                                               N=ptcount,
                                               f=ftest,
                                               seed=0)
                M = data3dmm.M
                N = data3dmm.N

                # mean shape and eigenvectors for 3dmm
                mu_s = torch.from_numpy(data3dmm.mu_s).float().detach()
                mu_s[:, 2] = mu_s[:, 2] * -1
                lm_eigenvec = torch.from_numpy(
                    data3dmm.lm_eigenvec).float().detach()
                sigma = torch.from_numpy(data3dmm.sigma).float().detach()
                sigma = torch.diag(sigma.squeeze())
                lm_eigenvec = torch.mm(lm_eigenvec, sigma)

                mu_s = torch.from_numpy(data3dmm.mu_s).float().detach()
                mu_s[:, 2] = mu_s[:, 2] * -1
                lm_eigenvec = torch.from_numpy(
                    data3dmm.lm_eigenvec).float().detach()
                sigma = torch.from_numpy(data3dmm.sigma).float().detach()
                sigma = torch.diag(sigma.squeeze())
                lm_eigenvec = torch.mm(lm_eigenvec, sigma)

                for k in range(5):
                    data = data3dmm[k]

                    # load the data
                    x_cam_gt = data['x_cam_gt']
                    shape_gt = data['x_w_gt']
                    fgt = data['f_gt']
                    x_img = data['x_img']
                    x_img_gt = data['x_img_gt']

                    ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1)
                    x = x_img.unsqueeze(0).permute(0, 2, 1)

                    # run the model
                    f = torch.squeeze(calib_net(ptsI) + 300)
                    betas = sfm_net(ptsI)
                    betas = betas.unsqueeze(-1)
                    eigenvec = torch.stack(M * [lm_eigenvec])
                    shape = torch.stack(M * [mu_s]) + torch.bmm(
                        eigenvec, betas).squeeze().view(M, N, 3)
                    shape = shape - shape.mean(1).unsqueeze(1)
                    shape = shape.mean(0)

                    # get motion measurement guess
                    K = torch.zeros((M, 3, 3)).float()
                    K[:, 0, 0] = f
                    K[:, 1, 1] = f
                    K[:, 2, 2] = 1
                    km, c_w, scaled_betas, alphas = util.EPnP_single(
                        ptsI, shape, K)
                    _, R, T, mask = util.optimizeGN(km, c_w, scaled_betas,
                                                    alphas, shape, ptsI)
                    error_time = util.getTimeConsistency(shape, R, T)
                    if error_time > 20:
                        mode = 'walk'
                    else:
                        mode = 'still'

                    # apply dual optimization
                    if optimize:
                        calib_net.load_state_dict(torch.load(calib_path))
                        sfm_net.load_state_dict(torch.load(sfm_path))
                        shape, K, R, T = dualoptimization(x,
                                                          calib_net,
                                                          sfm_net,
                                                          lm_eigenvec,
                                                          betas,
                                                          mu_s,
                                                          shape_gt=shape_gt,
                                                          fgt=fgt,
                                                          M=M,
                                                          N=N,
                                                          mode=mode)
                        f = K[0, 0].detach()

                    # get motion measurement guess
                    fmu = f.mean()
                    fmed = f.flatten().median()
                    K = torch.zeros(M, 3, 3).float()
                    K[:, 0, 0] = fmu
                    K[:, 1, 1] = fmu
                    K[:, 2, 2] = 1
                    K, _ = torch.median(K, dim=0)
                    km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
                    Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas,
                                                     alphas, shape, ptsI)

                    # get errors
                    rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T)

                    fpred_mean[i, j, l, k] = fmu.detach().cpu().item()
                    fpred_med[i, j, l, k] = fmed.detach().cpu().item()
                    factual[i, j, l, k] = fgt.detach().cpu().item()
                    depth_error[i, j, l, k] = rel_errors.cpu().mean().item()
                    print(
                        f"M: {viewcount} | N: {ptcount} | f/fgt: {fpred_mean[i,j,l,k]:.2f}/{factual[i,j,l,k]}"
                    )

                ferror_mu = np.mean(
                    np.abs(fpred_mean[i, j, l] - factual[i, j, l]) /
                    factual[i, j, l])
                ferror_med = np.mean(
                    np.abs(fpred_med[i, j, l] - factual[i, j, l]) /
                    factual[i, j, l])
                derror = np.mean(depth_error[i, j, l])
                f = np.mean(fpred_mean[i, j, l])
                print(
                    f"M: {viewcount} | N: {ptcount} | fgt: {ftest:.2f} | ferror_mu: {ferror_mu:.2f} | ferror_med: {ferror_med:.2f} | derror: {derror:.2f}"
                )

    matdata = {}
    matdata['fpred_mu'] = fpred_mean
    matdata['fpred_med'] = fpred_med
    matdata['fgt'] = factual
    matdata['derror'] = depth_error
    scipy.io.savemat(outfile, matdata)

    print(f"saved output to {outfile}")
Ejemplo n.º 14
0
def test(modelin=args.model,outfile=args.out,optimize=args.opt):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    calib_net = CalibrationNet3(n=1)
    sfm_net = CalibrationNet3(n=199)
    if modelin != "":
        calib_path = os.path.join('model','calib_' + modelin)
        sfm_path = os.path.join('model','sfm_' + modelin)
        calib_net.load_state_dict(torch.load(calib_path,map_location='cpu'))
        sfm_net.load_state_dict(torch.load(sfm_path,map_location='cpu'))
    calib_net.to(args.device)
    sfm_net.to(args.device)
    calib_net.eval()
    sfm_net.eval()

    # mean shape and eigenvectors for 3dmm
    M = 100
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float().to(args.device).detach()
    mu_lm[:,2] = mu_lm[:,2]*-1
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().to(args.device).detach()
    sigma = torch.from_numpy(data3dmm.sigma).float().to(args.device).detach()
    sigma = torch.diag(sigma.squeeze())
    lm_eigenvec = torch.mm(lm_eigenvec, sigma)

    batch_size = 10
    lm_eigenvec = torch.stack(batch_size*[lm_eigenvec])

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_fpred = []
    all_depth = []
    out_shape = []
    out_f = []

    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [i*100 for i in range(4,15)]
    for f_test in f_vals:
        f_test = 1000

        # create dataloader
        data = dataloader.TestData()
        data.batchsize = batch_size
        loader = data.createLoader(f_test)

        # containers
        f_pred = []
        shape_pred = []
        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100;
        N = 68;
        batch_size = data.batchsize;

        for j,data in enumerate(loader):
            # load the data
            x_cam_gt = data['x_cam_gt'].to(args.device)
            shape_gt = data['x_w_gt'].to(args.device)
            fgt = data['f_gt'].to(args.device)
            x_img = data['x_img'].to(args.device)
            x_img_gt = data['x_img_gt'].to(args.device)
            T_gt = data['T_gt'].to(args.device)

            # reshape and form data
            one = torch.ones(batch_size,M*N,1).to(device=args.device)
            x_img_one = torch.cat([x_img,one],dim=2)
            x_cam_pt = x_cam_gt.permute(0,1,3,2).reshape(batch_size,6800,3)
            x = x_img.permute(0,2,1).reshape(batch_size,2,M,N)
            ptsI = x_img_one.reshape(batch_size,M,N,3).permute(0,1,3,2)[:,:,:2,:]

            # run the model
            f = calib_net(x) + 300
            betas = sfm_net(x)
            betas = betas.squeeze(0).unsqueeze(-1)
            shape = mu_lm + torch.bmm(lm_eigenvec,betas).squeeze().view(batch_size,N,3)

            # additional optimization on initial solution
            if optimize:
                calib_net.load_state_dict(torch.load(calib_path,map_location=args.device))
                sfm_net.load_state_dict(torch.load(sfm_path,map_location=args.device))
                calib_net.train()
                sfm_net.train()
                opt1 = torch.optim.Adam(calib_net.parameters(),lr=1e-4)
                opt2 = torch.optim.Adam(sfm_net.parameters(),lr=1e-2)
                curloss = 100
                for outerloop in itertools.count():

                    # camera calibration
                    shape = shape.detach()
                    for iter in itertools.count():
                        opt1.zero_grad()
                        f = torch.mean(calib_net.forward2(x) + 300)
                        K = torch.zeros(3,3).float().to(device=args.device)
                        K[0,0] = f
                        K[1,1] = f
                        K[2,2] = 1

                        # ground truth l1 error
                        f_error = torch.mean(torch.abs(f - fgt))

                        # rmse
                        rmse = torch.norm(shape_gt - shape,dim=2).mean()

                        # differentiable PnP pose estimation
                        error1 = []
                        for i in range(batch_size):
                            km, c_w, scaled_betas, alphas = util.EPnP(ptsI[i],shape[i],K)
                            Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape[i],ptsI[i],K)
                            error2d = util.getReprojError2(ptsI[i],shape[i],R,T,K,show=False,loss='l1')
                            error1.append(error2d.mean())

                        # loss
                        loss = torch.stack(error1).mean()

                        # stopping condition
                        if iter == 5: break
                        if iter > 5 and prev_loss < loss:
                            break
                        else:
                            prev_loss = loss

                        # update
                        loss.backward()
                        opt1.step()
                        print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.mean().item():.1f}/{fgt.mean().item():.1f} | error2d: {loss.item():.3f} | rmse: {rmse.item():.3f} ")

                    # sfm
                    f = f.detach()
                    for iter in itertools.count():
                        opt2.zero_grad()

                        # shape prediction
                        betas = sfm_net.forward2(x)
                        betas = betas.unsqueeze(-1)
                        shape = mu_lm + torch.bmm(lm_eigenvec,betas).squeeze().view(batch_size,N,3)
                        K = torch.zeros((3,3)).float()
                        K[0,0] = f
                        K[1,1] = f
                        K[2,2] = 1

                        #rmse = torch.norm(shape_gt - shape,dim=1).mean().detach()
                        rmse = torch.norm(shape_gt - shape,dim=2).mean()

                        # differentiable PnP pose estimation
                        error1 = []
                        for i in range(batch_size):
                            km, c_w, scaled_betas, alphas = util.EPnP(ptsI[i],shape[i],K)
                            Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape[i],ptsI[i],K)
                            error2d = util.getReprojError2(ptsI[i],shape[i],R,T,K,show=False,loss='l1')
                            errorTime = util.getTimeConsistency(shape[i],R,T)
                            error1.append(error2d.mean())

                        #loss = torch.stack(error1).mean() + 0.01*torch.stack(error2).mean()
                        loss = torch.stack(error1).mean()

                        if iter == 5: break
                        if iter > 5 and prev_loss < loss:
                            break
                        else:
                            prev_loss = loss
                        loss.backward()
                        opt2.step()
                        print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.mean().item():.1f}/{fgt.mean().item():.1f} | error2d: {loss.item():.3f} | rmse: {rmse.item():.3f} ")
                    # closing condition for outerloop on dual objective
                    if torch.abs(curloss - loss) < 0.01: break
                    curloss = loss
            else:
                K = torch.zeros((batch_size,3,3)).float().to(device=args.device)
                K[:,0,0] = f.squeeze()
                K[:,1,1] = f.squeeze()
                K[:,2,2] = 1
                km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
                Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)

            #all_fpred.append(batch_size*[f.detach().item()])
            e2d,e3d,eshape,e2d_all,e3d_all,d_all = util.getBatchError(ptsI.detach(),shape.detach(),K.detach(),x_cam_gt,shape_gt)
            f_error = torch.squeeze(torch.abs(fgt - f)/fgt)

            e2d = e2d.cpu().numpy()
            e3d = e3d.cpu().numpy()
            eshape = eshape.cpu().numpy()
            f_error = f_error.cpu().squeeze().numpy()
            e2d_all = e2d_all.cpu().numpy()
            e3d_all = e3d_all.cpu().numpy()
            d_all = d_all.cpu().numpy()

            f_pred.append(f.detach().cpu().item())
            shape_pred.append(shape.detach().cpu().numpy())
            all_depth.append(d_all.flatten())
            all_f.append(np.array([fgt.mean()] * d_all.flatten().shape[0]))
            all_fpred.append(np.array([f.mean()]*d_all.flatten().shape[0]))

            print(f"f/sequence: {f_test}/{j}  | f/fgt: {f.mean().item():.3f}/{fgt.mean().item():.3f} |  f_error_rel: {f_error.mean().item():.4f}  | rmse: {eshape.mean().item():.4f}  | rel rmse: {np.mean(e3d):.4f}    | 2d error: {np.mean(e2d):.4f}")

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        out_f.append(np.array(f_pred))
        out_shape.append(np.concatenate(shape_pred,axis=0))
        print(f"f_error_rel: {avg_relf:.4f}  | rel rmse: {avg_rel3d:.4f}    | 2d error: {avg_2d:.4f} |  rmse: {avg_3d:.4f}  |")

    out_shape = np.stack(out_shape)
    out_f = np.stack(out_f)
    all_f = np.stack(all_f).flatten()
    all_fpred = np.stack(all_fpred).flatten()
    all_depth = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_fpred'] = np.array(all_fpred)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    matdata['out_shape'] = out_shape
    matdata['out_f'] = out_f
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 15
0
def dualoptimization(x,
                     calib_net,
                     sfm_net,
                     shape_gt=None,
                     fgt=None,
                     M=100,
                     N=68,
                     mode='still',
                     ptstart=0):

    if mode == 'still':
        alpha = 0.1
    else:
        alpha = 0.001

    ini_pose = torch.zeros((M, 6))
    ini_pose[:, 5] = 99

    # define what weights gets optimized
    calib_net.eval()
    sfm_net.eval()
    trainfc(calib_net)
    trainfc(sfm_net)

    x2d = x.squeeze().permute(1, 0).reshape((M, N, 2))
    ptsI = x.squeeze().permute(1, 0).reshape((M, N, 2)).permute(0, 2, 1)
    ptsI = ptsI[:, :, ptstart:]
    x2d = x2d[:, ptstart:, :]

    # run the model
    f = calib_net(x) + 300
    betas = sfm_net(x)
    betas = betas.squeeze(0).unsqueeze(-1)
    shape = mu_lm + torch.mm(lm_eigenvec, betas).squeeze().view(N, 3)
    shape = shape - shape.mean(0).unsqueeze(0)
    shape = shape[ptstart:, :]

    opt1 = torch.optim.Adam(calib_net.parameters(), lr=1e-5)
    opt2 = torch.optim.Adam(sfm_net.parameters(), lr=10)
    curloss = 100
    for outerloop in itertools.count():
        shape = shape.detach()
        for iter in itertools.count():
            opt1.zero_grad()
            f = calib_net(x) + 300
            K = torch.zeros(3, 3).float()
            K[0, 0] = f
            K[1, 1] = f
            K[2, 2] = 1

            # differentiable PnP pose estimation
            pose = bpnp(x2d, shape, K, ini_pose)
            pred = BPnP.batch_project(pose, shape, K)

            # apply loss
            loss = (torch.norm(pred - x2d, dim=-1)).mean()
            if iter >= 5: break
            prv_loss = loss.item()
            loss.backward()
            opt1.step()

            # log results on console
            if not shape_gt is None:
                d, Z, tform = util.procrustes(shape.detach().numpy(),
                                              shape_gt.detach().numpy())
                rmse = torch.norm(shape_gt - Z, dim=1).mean().detach()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f - ftrue))
            print(
                f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {loss.item():.3f} | rmse: {rmse:.2f}"
            )

        f = f.detach()
        for iter in itertools.count():
            opt2.zero_grad()

            # shape prediction
            betas = sfm_net(x)
            shape = torch.sum(betas * lm_eigenvec, 1)
            shape = shape.reshape(68, 3) + mu_lm
            shape = shape - shape.mean(0).unsqueeze(0)
            shape = shape[ptstart:, :]
            K = torch.zeros((3, 3)).float()
            K[0, 0] = f
            K[1, 1] = f
            K[2, 2] = 1

            # differentiable PnP pose estimation
            pose = bpnp(x2d, shape, K, ini_pose)
            pred = BPnP.batch_project(pose, shape, K)

            # apply loss
            loss = (torch.norm(pred - x2d, dim=-1)).mean()
            if iter >= 5: break
            loss.backward()
            opt2.step()
            prv_loss = loss.item()

            # log results on console
            if not shape_gt is None:
                d, Z, tform = util.procrustes(shape.detach().numpy(),
                                              shape_gt.detach().numpy())
                rmse = torch.norm(shape_gt - Z, dim=1).mean().detach()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f - ftrue))
            print(
                f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {loss.item():.3f} | rmse: {rmse:.2f}"
            )

        if torch.abs(curloss - loss) <= 0.01 or curloss < loss: break
        curloss = loss

    km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
    Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas, shape,
                                     ptsI, K)
    return shape, K, R, T
Ejemplo n.º 16
0
def test(model,
         modelin=args.model,
         outfile=args.out,
         feature_transform=args.feat_trans):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    if modelin != "":
        model.load_state_dict(torch.load(modelin))
    model.eval()

    # mean shape and eigenvectors for 3dmm
    M = 100
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float()
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    shape = mu_lm

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_depth = []

    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [i * 100 for i in range(4, 21)]
    for f_test in f_vals:
        # create dataloader
        data = dataloader.TestLoader(f_test)

        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100
        N = 68
        batch_size = 1

        for k in range(len(data)):
            batch = data[4]
            x_cam_gt = batch['x_cam_gt']
            x_w_gt = batch['x_w_gt']
            f_gt = batch['f_gt']
            x_img = batch['x_img'].unsqueeze(0)
            x_img_gt = batch['x_img_gt']
            T_gt = batch['T_gt']
            sequence = batch['x_img'].reshape((M, N, 2)).permute(0, 2, 1)

            all_depth.append(np.mean(T_gt[:, 2]))
            all_f.append(f_gt.numpy()[0])

            x = x_img.reshape((batch_size, M, N, 2)).permute(0, 3, 2, 1) / 640
            x_one = torch.cat(
                [x.squeeze().permute(2, 0, 1) * 640,
                 torch.ones(M, 1, N)],
                dim=1)

            # run the model
            out = model(x)
            betas = out[:, :199]
            fout = torch.relu(out[:, 199])
            if torch.any(fout < 1): fout = fout + 1

            # apply 3DMM model from predicted parameters
            alpha_matrix = torch.diag(betas.squeeze())
            shape_cov = torch.mm(lm_eigenvec, alpha_matrix)
            s = shape_cov.sum(1).view(68, 3)
            #shape = (mu_lm + s)
            #shape = mu_lm
            #shape[:,2] = shape[:,2]*-1

            # create variables and optimizer for variables as SGD
            # run epnp using predicted shape and intrinsics
            varf = Variable(fout, requires_grad=True)
            K = torch.zeros((3, 3))
            K[0, 0] = varf
            K[1, 1] = varf
            K[2, 2] = 1
            K[0, 2] = 0
            K[1, 2] = 0
            Xc, R, T = util.EPnP(sequence, shape, K)
            tmpT = T.detach()
            tmpR = R.detach()
            varR = Variable(R, requires_grad=True)
            varT = Variable(T, requires_grad=True)
            optimizer = torch.optim.Adam([varR, varT], lr=1e-1)

            # optimize results for image consistency
            ferror = []
            losses = []
            minerror = 10000
            for iter in itertools.count():
                K = torch.zeros((3, 3))
                K[0, 0] = varf
                K[1, 1] = varf
                K[2, 2] = 1
                K[0, 2] = 0
                K[1, 2] = 0

                R = varR
                T = varT
                Xc, _, _ = util.EPnP(sequence, shape, K)
                #Xc,R,T = util.EPnP(sequence,shape,K)
                optimizer.zero_grad()

                # k inverse
                kinv = torch.zeros(3, 3).float()
                kinv[0, 0] = 1 / varf
                kinv[1, 1] = 1 / varf
                kinv[2, 2] = 1

                # get errors
                reproj_errors2 = util.getReprojError2(sequence, shape, R, T, K)
                #reproj_errors3 = util.getReprojError3(x_cam_gt,shape,varR,varT)
                error_3d = util.getRelReprojError3(x_cam_gt, shape, R,
                                                   T).mean()
                #error_3d = util.getPCError(x_cam_gt,x_one.permute(0,2,1),torch.stack(100*[kinv]),mode='l2')

                error_Rconsistency = util.getRConsistency(R)
                error_Tconsistency = util.getTConsistency(T) * 0.001
                error_3dconsistency = util.get3DConsistency(
                    sequence, shape, kinv, R, T)
                reproj_error = torch.mean(reproj_errors2)

                # determine convergence
                loss = error_3dconsistency
                if loss < minerror:
                    minerror = loss
                    minf = varf.item()
                    minR = R
                    minT = T
                    convergence = 0
                else:
                    convergence += 1

                loss.backward()
                optimizer.step()

                f = util.solvef(sequence, Xc.detach())
                print(f)
                #if varf < 0: varf = varf*-1
                delta = K[0, 0] - varf
                direction = torch.sign(delta)
                error_f = torch.abs(varf - f_gt) / f_gt
                ferror.append(error_f.item())
                losses.append(loss.item())

                print(
                    f"iter: {iter} | loss: {loss.item():.3f} | f/fgt: {varf.item():.3f}/{f_gt.item():.3f} | 2d error: {reproj_error.item():.3f} | error R: {error_Rconsistency.item():.3f} | error T: {error_Tconsistency.item():.3f} | error 3d: {error_3dconsistency.item():.3f} | GT RMSE: {error_3d.item():.3f} | delta: {delta.item():.3f}"
                )
                if convergence == 100: break

            data = {'ferror': np.array(ferror), 'loss': np.array(losses)}
            scipy.io.savemat("optimizationlr1.mat", data)
            quit()

            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(f_gt - fout) / f_gt

            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())

            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(
                f"f/sequence: {f_test}/{k}  | f/fgt: {fout[0].item():.3f}/{f_gt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}"
            )
            #end for

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        #end for

    all_f = np.stack(all_f).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_3d = np.stack(allerror_3d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile, matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 17
0
def dualoptimization(x,calib_net,sfm_net,shape_gt=None,fgt=None,M=100,N=68):

    # define what weights gets optimized
    calib_net.eval()
    sfm_net.eval()
    trainfc(calib_net)
    trainfc(sfm_net)

    ptsI = x.squeeze().permute(1,0).reshape((M,N,2)).permute(0,2,1)
    ptsI = ptsI[:,:,ptstart:]
    # run the model
    f = calib_net(x) + 300
    betas = sfm_net(x)
    betas = betas.squeeze(0).unsqueeze(-1)
    shape = mu_lm + torch.mm(lm_eigenvec,betas).squeeze().view(N,3)
    shape = shape - shape.mean(0).unsqueeze(0)
    shape = shape[ptstart:,:]

    opt1 = torch.optim.Adam(calib_net.parameters(),lr=1e-4)
    opt2 = torch.optim.Adam(sfm_net.parameters(),lr=10)
    curloss = 100
    for outerloop in itertools.count():
        f = f.detach()
        for iter in itertools.count():
            opt2.zero_grad()

            # shape prediction
            betas = sfm_net(x)
            shape = torch.sum(betas * lm_eigenvec,1)
            shape = shape.reshape(68,3) + mu_lm
            shape = shape - shape.mean(0).unsqueeze(0)
            shape = shape[ptstart:,:]
            K = torch.zeros((3,3)).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1

            # differentiable PnP pose estimation
            km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
            Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)
            error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2')
            error_time = util.getTimeConsistency(shape,R,T)

            # apply loss
            loss = error2d.mean() + 0.01*error_time
            if iter >= 5 and loss > prv_loss: break
            loss.backward()
            opt2.step()
            prv_loss = loss.item()

            # log results on console
            if not shape_gt is None:
                rmse = torch.norm(shape_gt - shape,dim=1).mean().item()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f-ftrue))
            print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {error2d.mean().item():.3f} | rmse: {rmse:.2f}")

        shape = shape.detach()
        for iter in itertools.count():
            opt1.zero_grad()
            f = calib_net(x) + 300
            K = torch.zeros(3,3).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1

            # pose estimation
            km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K)
            Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)
            error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l1')
            error_time = util.getTimeConsistency(shape,R,T)

            # apply loss
            loss = error2d.mean() + 0.01*error_time
            if iter >= 5 and loss > prv_loss: break
            prv_loss = loss.item()
            loss.backward()
            opt1.step()

            # log results on console
            if not shape_gt is None:
                rmse = torch.norm(shape_gt - shape,dim=1).mean().item()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f-ftrue))
            print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {error2d.mean().item():.3f} | rmse: {rmse:.2f}")

        if torch.abs(curloss  - loss) <= 0.01 or curloss < loss: break
        curloss = loss
    return shape,K,R,T
Ejemplo n.º 18
0
def dualoptimization(ptsI,
                     calib_net,
                     sfm_net,
                     shape_gt=None,
                     fgt=None,
                     M=100,
                     N=68,
                     mode='still',
                     ptstart=0):

    if mode == 'still':
        alpha = 0.1
    else:
        alpha = 0.001

    # define what weights gets optimized
    calib_net.eval()
    sfm_net.eval()
    trainfc(calib_net)
    trainfc(sfm_net)
    M, _, N = ptsI.shape

    # run the model
    f = calib_net(ptsI) + 300
    f = f.mean()
    betas = sfm_net(ptsI)
    betas = betas.unsqueeze(-1)
    eigenvec = torch.stack(M * [lm_eigenvec])
    shape = torch.stack(M * [mu_lm]) + torch.bmm(
        eigenvec, betas).squeeze().view(M, N, 3)
    shape = shape - shape.mean(1).unsqueeze(1)
    shape = shape.mean(0)

    opt1 = torch.optim.Adam(calib_net.parameters(), lr=1e-5)
    opt2 = torch.optim.Adam(sfm_net.parameters(), lr=1)
    curloss = 100
    for outerloop in itertools.count():
        shape = shape.detach()
        for iter in itertools.count():
            opt1.zero_grad()
            f = calib_net(ptsI) + 300
            f = f.mean()
            K = torch.zeros(3, 3).float()
            K[0, 0] = f
            K[1, 1] = f
            K[2, 2] = 1

            # pose estimation
            km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
            _, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas,
                                            shape, ptsI, K)
            Xc = torch.bmm(R, torch.stack(M * [shape.T])) + T.unsqueeze(2)
            #shape_error = util.getShapeError(ptsI,Xc,shape,f,R,T)
            error_time = util.getTimeConsistency(shape, R, T)
            error2d = util.getReprojError2(ptsI,
                                           shape,
                                           R,
                                           T,
                                           K,
                                           show=False,
                                           loss='l2')

            # apply loss
            loss = error2d.mean()
            #loss = error2d.mean() + alpha*error_time
            if iter >= 5: break
            prv_loss = loss.item()
            loss.backward()
            opt1.step()

            # log results on console
            if not shape_gt is None:
                rmse = torch.norm(shape_gt - shape, dim=1).mean().item()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f - ftrue))
            print(
                f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {error2d.mean().item():.3f} | error_time: {error_time.item():.2f} | rmse: {rmse:.2f}"
            )

        f = f.detach()
        for iter in itertools.count():
            opt2.zero_grad()

            # shape prediction
            betas = sfm_net(ptsI)
            betas = betas.unsqueeze(-1)
            eigenvec = torch.stack(M * [lm_eigenvec])
            shape = torch.stack(M * [mu_lm]) + torch.bmm(
                eigenvec, betas).squeeze().view(M, N, 3)
            shape = shape - shape.mean(1).unsqueeze(1)
            shape = shape.mean(0)
            K = torch.zeros((3, 3)).float()
            K[0, 0] = f
            K[1, 1] = f
            K[2, 2] = 1

            # differentiable PnP pose estimation
            km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
            _, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas,
                                            shape, ptsI, K)
            error2d = util.getReprojError2(ptsI,
                                           shape,
                                           R,
                                           T,
                                           K,
                                           show=False,
                                           loss='l2')
            Xc = torch.bmm(R, torch.stack(M * [shape.T])) + T.unsqueeze(2)
            #shape_error = util.getShapeError(ptsI,Xc,shape,f,R,T)
            error_time = util.getTimeConsistency(shape, R, T)

            # apply loss
            loss = error2d.mean()
            #loss = error2d.mean() + alpha*error_time
            #if iter >= 5 and loss > prv_loss: break
            if iter >= 5: break
            loss.backward()
            opt2.step()
            prv_loss = loss.item()

            # log results on console
            if not shape_gt is None:
                rmse = torch.norm(shape_gt - shape, dim=1).mean().item()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f - ftrue))
            print(
                f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {error2d.mean().item():.3f} | error_time: {error_time.item():.2f} | rmse: {rmse:.2f}"
            )

        if torch.abs(curloss - loss) <= 0.01 or curloss < loss: break
        curloss = loss
    return shape, K, R, T
Ejemplo n.º 19
0
def dualoptimization(x,
                     calib_net,
                     sfm_net,
                     shape_gt=None,
                     fgt=None,
                     M=100,
                     N=68,
                     mode='still',
                     ptstart=0):
    alphaModel = PointNetSmall(n=1)
    for module in alphaModel.modules():
        if isinstance(module, torch.nn.modules.BatchNorm1d):
            module.eval()
        if isinstance(module, torch.nn.modules.BatchNorm2d):
            module.eval()
        if isinstance(module, torch.nn.modules.BatchNorm3d):
            module.eval()

    # define what weights gets optimized
    calib_net.eval()
    sfm_net.eval()
    trainfc(calib_net)
    trainfc(sfm_net)

    ptsI = x.squeeze().permute(1, 0).reshape((M, N, 2)).permute(0, 2, 1)
    ptsI = ptsI[:, :, ptstart:]
    # run the model
    f = calib_net(x) + 300
    betas = sfm_net(x)
    betas = betas.squeeze(0).unsqueeze(-1)
    shape = mu_lm + torch.mm(lm_eigenvec, betas).squeeze().view(N, 3)
    shape = shape - shape.mean(0).unsqueeze(0)
    shape = shape[ptstart:, :]

    opt1 = torch.optim.Adam(list(calib_net.parameters()) +
                            list(alphaModel.parameters()),
                            lr=1e-5)
    opt2 = torch.optim.Adam(list(sfm_net.parameters()) +
                            list(alphaModel.parameters()),
                            lr=1)
    curloss = 100
    for outerloop in itertools.count():
        shape = shape.detach()
        for iter in itertools.count():
            opt1.zero_grad()
            f = calib_net(x) + 300
            K = torch.zeros(3, 3).float()
            K[0, 0] = f
            K[1, 1] = f
            K[2, 2] = 1

            alpha = alphaModel(x)

            # pose estimation
            km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
            _, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas,
                                            shape, ptsI, K)
            Xc = torch.bmm(R, torch.stack(M * [shape.T])) + T.unsqueeze(2)
            shape_error = util.getShapeError(ptsI, Xc, shape, f, R, T)
            error_time = util.getTimeConsistency(shape, R, T)
            error2d = util.getReprojError2(ptsI,
                                           shape,
                                           R,
                                           T,
                                           K,
                                           show=False,
                                           loss='l2')

            # apply loss
            loss = error2d.mean() + alpha * error_time - torch.log(alpha)
            if iter >= 5: break
            prv_loss = loss.item()
            loss.backward()
            opt1.step()

            # log results on console
            if not shape_gt is None:
                rmse = torch.norm(shape_gt - shape, dim=1).mean().item()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f - ftrue))
            print(
                f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {error2d.mean().item():.3f} | shape_error: {shape_error.item():.3f} | rmse: {rmse:.2f} | alpha: {alpha.item():.4f} | error_time: {error_time:.2f}"
            )

        f = f.detach()
        for iter in itertools.count():
            opt2.zero_grad()

            # shape prediction
            betas = sfm_net(x)
            shape = torch.sum(betas * lm_eigenvec, 1)
            shape = shape.reshape(68, 3) + mu_lm
            shape = shape - shape.mean(0).unsqueeze(0)
            shape = shape[ptstart:, :]
            K = torch.zeros((3, 3)).float()
            K[0, 0] = f
            K[1, 1] = f
            K[2, 2] = 1

            alpha = alphaModel(x)

            # differentiable PnP pose estimation
            km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
            _, R, T, mask = util.optimizeGN(km, c_w, scaled_betas, alphas,
                                            shape, ptsI, K)
            error2d = util.getReprojError2(ptsI,
                                           shape,
                                           R,
                                           T,
                                           K,
                                           show=False,
                                           loss='l2')
            Xc = torch.bmm(R, torch.stack(M * [shape.T])) + T.unsqueeze(2)
            shape_error = util.getShapeError(ptsI, Xc, shape, f, R, T)
            error_time = util.getTimeConsistency(shape, R, T)

            # apply loss
            #loss = error2d.mean()
            loss = error2d.mean() + alpha * error_time - torch.log(alpha)
            #if iter >= 5 and loss > prv_loss: break
            if iter >= 5: break
            loss.backward()
            opt2.step()
            prv_loss = loss.item()

            # log results on console
            if not shape_gt is None:
                rmse = torch.norm(shape_gt - shape, dim=1).mean().item()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f - ftrue))
            print(
                f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {error2d.mean().item():.3f} | rmse: {rmse:.2f} | alpha: {alpha.item():.4f} | error_time: {error_time:.2f}"
            )

        if torch.abs(curloss - loss) <= 0.01 or curloss < loss: break
        curloss = loss
    return shape, K, R, T
Ejemplo n.º 20
0
def test(modelin=args.model,outfile=args.out,feature_transform=args.ft):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    #if modelin != "":
    #    model.load_state_dict(torch.load(modelin))
    #model.eval()
    #model.cuda()

    # mean shape and eigenvectors for 3dmm
    M = 100
    N = 68
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float()
    mu_lm[:,2] = mu_lm[:,2]*-1
    le = torch.mean(mu_lm[36:42,:],axis=0)
    re = torch.mean(mu_lm[42:48,:],axis=0)
    ipd = torch.norm(le - re)
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2)

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_fpred = []
    all_depth = []
    out_shape = []
    out_f = []
    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [i*100 for i in range(4,15)]

    # set random seed for reproducibility of test set
    for f_test in f_vals:
        # create dataloader
        loader = dataloader.TestLoader(f_test)

        f_pred = []
        shape_pred = []
        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100;
        N = 68;
        batch_size = 1;

        for j, data in enumerate(loader):
            if j == 10: break
            # create bpnp camera calibration model
            calib_net= (1.1*torch.randn(1)).requires_grad_()

            # create bpnp sfm model
            sfm_net  = torchvision.models.vgg11()
            sfm_net.classifier = torch.nn.Linear(25088,N*3)

            # load the data
            x_cam_gt = data['x_cam_gt']
            shape_gt = data['x_w_gt']
            fgt = data['f_gt']
            x_img = data['x_img']
            x_img_gt = data['x_img_gt']

            depth = torch.norm(x_cam_gt.mean(2),dim=1)
            all_depth.append(depth.numpy())
            all_f.append(fgt.numpy()[0])

            ptsI = x_img.reshape((M,N,2)).permute(0,2,1)
            x_img_pts = x_img.reshape((M,N,2)).permute(0,2,1)
            one = torch.ones(M*N,1)
            x_img_one = torch.cat([x_img,one],dim=1)
            x = x_img_one.permute(1,0)

            # run the model
            f = torch.sigmoid(calib_net)*2000
            shape = mu_lm
            ini_pose = torch.zeros((M,6))
            ini_pose[:,5] = 99
            curloss = 100

            # apply dual optimization
            shape,K,R,T = dualoptimization(x,ptsI,x2d,ini_pose,calib_net,sfm_net,shape_gt=shape_gt,fgt=fgt)
            f = K[0,0].detach()
            all_fpred.append(f.item())

            # get errors
            km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K)
            Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)

            # get errors
            reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K)
            reproj_errors3 = torch.norm(shape_gt - shape,dim=1).mean()
            rel_errors =  util.getRelReprojError3(x_cam_gt,shape,R,T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(fgt - f) / fgt

            # save final prediction
            f_pred.append(f.detach().cpu().item())
            shape_pred.append(shape.detach().cpu().numpy())

            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())

            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(f"f/sequence: {f_test}/{j}  | f/fgt: {f.item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
            #end for

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        out_f.append(np.stack(f_pred))
        out_shape.append(np.stack(shape_pred,axis=0))
        #end for

    all_f = np.stack(all_f).flatten()
    all_fpred = np.stack(all_fpred).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_3d = np.stack(allerror_3d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    #matdata['shape'] = shape.detach().cpu().numpy()
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_fpred'] = np.array(all_fpred)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    matdata['shape'] = np.stack(out_shape)
    matdata['f'] = np.stack(out_f)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 21
0
def testReal(modelin=args.model,outfile=args.out,optimize=args.opt,db=args.db):
    # define model, dataloader, 3dmm eigenvectors, optimization method
    calib_net = PointNet(n=1)
    sfm_net = PointNet(n=199)
    if modelin != "":
        calib_path = os.path.join('model','calib_' + modelin)
        sfm_path = os.path.join('model','sfm_' + modelin)
        calib_net.load_state_dict(torch.load(calib_path))
        sfm_net.load_state_dict(torch.load(sfm_path))
    calib_net.eval()
    sfm_net.eval()

    # mean shape and eigenvectors for 3dmm
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach()
    mu_lm[:,2] = mu_lm[:,2]*-1
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().detach()
    sigma = torch.from_numpy(data3dmm.sigma).float().detach()
    sigma = torch.diag(sigma.squeeze())
    lm_eigenvec = torch.mm(lm_eigenvec, sigma)

    # define loader
    loader = getLoader(db)
    f_pred = []
    shape_pred = []
    error_2d = []
    error_relf = []
    error_rel3d = []
    for sub in range(len(loader)):
        batch = loader[sub]
        x_cam_gt = batch['x_cam_gt']
        fgt = batch['f_gt']
        x_img = batch['x_img']
        x_img_gt = batch['x_img_gt']
        M = x_img_gt.shape[0]
        N = x_img_gt.shape[-1]

        ptsI = x_img.reshape((M,N,2)).permute(0,2,1)
        x = x_img.unsqueeze(0).permute(0,2,1)

        # run the model
        f = calib_net(x) + 300
        betas = sfm_net(x)
        betas = betas.squeeze(0).unsqueeze(-1)
        shape = mu_lm + torch.mm(lm_eigenvec,betas).squeeze().view(N,3)
        shape = shape - shape.mean(0).unsqueeze(0)

        # get motion measurement guess
        K = torch.zeros((3,3)).float()
        K[0,0] = f
        K[1,1] = f
        K[2,2] = 1
        km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
        _, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI)
        error_time = util.getTimeConsistency(shape,R,T)
        if error_time > 20:
            mode='walk'
        else:
            mode='still'

        # adjust number of landmarks
        M = x_img_gt.shape[0]
        N = x_img_gt.shape[-1]

        # additional optimization on initial solution
        if optimize:
            calib_net.load_state_dict(torch.load(calib_path))
            sfm_net.load_state_dict(torch.load(sfm_path))
            if db == 'biwi':
                shape_gt = batch['x_w_gt']
                shape,K,R,T = dualoptimization(x,calib_net,sfm_net,shape_gt=shape_gt,fgt=fgt,M=M,N=N,mode=mode,db='biwi')
            else:
                shape,K,R,T = dualoptimization(x,calib_net,sfm_net,fgt=fgt,M=M,N=N,mode=mode)
            f = K[0,0].detach()
        else:
            K = torch.zeros(3,3).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1
            km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
            Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI)

        # get errors
        reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K)
        rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T)

        reproj_error = reproj_errors2.mean()
        rel_error = rel_errors.mean()
        f_error = torch.abs(fgt - f) / fgt

        # save final prediction
        f_pred.append(f.detach().cpu().item())
        shape_pred.append(shape.detach().cpu().numpy())

        error_2d.append(reproj_error.cpu().data.item())
        error_rel3d.append(rel_error.cpu().data.item())
        error_relf.append(f_error.cpu().data.item())

        print(f" f/fgt: {f.item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
        #end for

    # prepare output file
    out_shape = np.stack(shape_pred)
    out_f = np.stack(f_pred)

    matdata = {}
    matdata['shape'] = np.stack(out_shape)
    matdata['f'] = np.stack(out_f)
    matdata['error_2d'] = np.array(error_2d)
    matdata['error_rel3d'] = np.array(error_rel3d)
    matdata['error_relf'] = np.array(error_relf)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(error_2d)}")
    print(f"MEAN seterror_rel3d: {np.mean(error_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(error_relf)}")
Ejemplo n.º 22
0
def dualoptimization(x,ptsI,x2d,ini_pose,calib_net,sfm_net,shape_gt=None,fgt=None,M=100,N=68):

    # run the model
    f = torch.sigmoid(calib_net)*2000
    shape = mu_lm

    opt1 = torch.optim.Adam({calib_net},lr=1e-1)
    opt2 = torch.optim.Adam(sfm_net.parameters(),lr=1e-4)
    curloss = 10000
    for outerloop in itertools.count():
        shape = shape.detach()
        for iter in itertools.count():
            opt1.zero_grad()
            f = torch.sigmoid(calib_net)*1400
            K = torch.zeros(3,3).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1

            # differentiable PnP pose estimation
            pose = bpnp(x2d,shape,K,ini_pose)
            pred = BPnP.batch_project(pose,shape,K)

            # apply loss
            loss = (torch.norm(pred - x2d,dim=-1)).mean()
            if iter >= 5: break
            prv_loss = loss.item()
            loss.backward()
            opt1.step()

            # log results on console
            if not shape_gt is None:
                d,Z,tform = util.procrustes(shape.detach().numpy(),shape_gt.detach().numpy())
                rmse = torch.norm(shape_gt - Z,dim=1).mean().detach()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f-ftrue))
            print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {loss.item():.3f} | rmse: {rmse:.2f}")

        f = f.detach()
        for iter in itertools.count():
            opt2.zero_grad()

            # shape prediction
            shape = sfm_net(torch.ones(1,3,32,32)).view(N,3)
            le = torch.mean(shape[36:42,:],axis=0)
            re = torch.mean(shape[42:48,:],axis=0)
            pred_ipd = torch.norm(le - re).detach()
            shape = (ipd/pred_ipd) * shape
            shape = shape - shape.mean(0).unsqueeze(0)

            # focal length
            K = torch.zeros((3,3)).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1

            # differentiable PnP pose estimation
            pose = bpnp(x2d,shape,K,ini_pose)
            pred = BPnP.batch_project(pose,shape,K)

            # apply loss
            loss = (torch.norm(pred - x2d,dim=-1)).mean()
            if iter >= 5: break
            loss.backward()
            opt2.step()
            prv_loss = loss.item()

            # log results on console
            if not shape_gt is None:
                d,Z,tform = util.procrustes(shape.detach().numpy(),shape_gt.detach().numpy())
                rmse = torch.norm(shape_gt - Z,dim=1).mean().detach()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f-ftrue))
            print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {loss.item():.3f} | rmse: {rmse:.2f}")


        if torch.abs(curloss  - loss) <= 0.01 or curloss < loss: break
        curloss = loss

    km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
    Xc,R,T,mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)

    return shape,K,R,T
Ejemplo n.º 23
0
def dualoptimization(x,calib_net,sfm_net,shape_gt=None,fgt=None,M=100,N=68,mode='still',db='real'):

    if mode == 'still':
        alpha = 1
    else:
        alpha = 0.0001

    # define what weights gets optimized
    calib_net.eval()
    sfm_net.eval()
    trainfc(calib_net)
    trainfc(sfm_net)

    ptsI = x.squeeze().permute(1,0).reshape((M,N,2)).permute(0,2,1)

    # run the model
    f = calib_net(x) + 300
    betas = sfm_net(x)
    betas = betas.squeeze(0).unsqueeze(-1)
    shape = mu_lm + torch.mm(lm_eigenvec,betas).squeeze().view(N,3)
    shape = shape - shape.mean(0).unsqueeze(0)

    opt1 = torch.optim.Adam(calib_net.parameters(),lr=1e-4)
    opt2 = torch.optim.Adam(sfm_net.parameters(),lr=5)
    curloss = 100
    for outerloop in itertools.count():
        shape = shape.detach()
        for iter in itertools.count():

            opt1.zero_grad()
            f = calib_net(x) + 300
            K = torch.zeros(3,3).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1

            # pose estimation
            km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K)
            _, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI)
            Xc = torch.bmm(R,torch.stack(M*[shape.T])) + T.unsqueeze(2)
            shape_error = util.getShapeError(ptsI,Xc,shape,f,R,T)
            error_time = util.getTimeConsistency(shape,R,T)
            error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2')

            # apply loss
            #loss = shape_error
            loss = error2d.mean() + alpha*error_time
            #loss = error2d.mean()

            # database constraint
            if db == 'biwi':
                dgt = 1000
                dpred = torch.norm(T,dim=1)
                loss = loss + 0.001*torch.abs(dgt - dpred).mean()

            if iter >= 5: break
            prv_loss = loss.item()
            loss.backward()
            opt1.step()

            # log results on console
            if not shape_gt is None:
                rmse = torch.norm(shape_gt - shape,dim=1).mean().item()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f-ftrue))
            print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {error2d.mean().item():.3f} | error_time: {error_time.item():.2f} | rmse: {rmse:.2f}")

        f = f.detach()
        for iter in itertools.count():
            opt2.zero_grad()

            # shape prediction
            betas = sfm_net(x)
            shape = torch.sum(betas * lm_eigenvec,1)
            shape = shape.reshape(68,3) + mu_lm
            shape = shape - shape.mean(0).unsqueeze(0)
            K = torch.zeros((3,3)).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1

            # differentiable PnP pose estimation
            km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
            _, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI)
            error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2')
            Xc = torch.bmm(R,torch.stack(M*[shape.T])) + T.unsqueeze(2)
            shape_error = util.getShapeError(ptsI,Xc,shape,f,R,T)
            error_time = util.getTimeConsistency(shape,R,T)

            # apply loss
            #loss = shape_error
            loss = error2d.mean() + alpha*error_time
            #loss = error2d.mean()

            # database constraint
            if db == 'biwi':
                dgt = 1000
                dpred = torch.norm(T,dim=1)
                loss = loss + 0.001*torch.abs(dgt - dpred).mean()

            if iter >= 5: break
            loss.backward()
            opt2.step()
            prv_loss = loss.item()

            # log results on console
            if not shape_gt is None:
                rmse = torch.norm(shape_gt - shape,dim=1).mean().item()
            else:
                rmse = -1
            if not fgt is None:
                ftrue = fgt.item()
            else:
                fgt = -1
            f_error = torch.mean(torch.abs(f-ftrue))
            print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{ftrue:.1f} | error2d: {error2d.mean().item():.3f} | error_time: {error_time.item():.2f} | rmse: {rmse:.2f}")

        if torch.abs(curloss  - loss) <= 0.01 or curloss < loss: break
        curloss = loss
    return shape,K,R,T
Ejemplo n.º 24
0
def testBIWIID(modelin=args.model,outfile=args.out,optimize=args.opt):
    # define model, dataloader, 3dmm eigenvectors, optimization method
    calib_net = CalibrationNet3(n=1)
    sfm_net = CalibrationNet3(n=199)
    if modelin != "":
        calib_path = os.path.join('model','calib_' + modelin)
        sfm_path = os.path.join('model','sfm_' + modelin)
        calib_net.load_state_dict(torch.load(calib_path))
        sfm_net.load_state_dict(torch.load(sfm_path))
    calib_net.eval()
    sfm_net.eval()

    # mean shape and eigenvectors for 3dmm
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach()
    mu_lm[:,2] = mu_lm[:,2]*-1
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float().detach()
    sigma = torch.from_numpy(data3dmm.sigma).float().detach()
    sigma = torch.diag(sigma.squeeze())
    lm_eigenvec = torch.mm(lm_eigenvec, sigma)

    # define loader
    loader = dataloader.BIWIIDLoader()
    f_pred = []
    shape_pred = []
    error_2d = []
    error_relf = []
    error_rel3d = []
    for idx in range(len(loader)):
        batch = loader[idx]
        x_cam_gt = batch['x_cam_gt']
        fgt = batch['f_gt']
        x_img = batch['x_img']
        x_img_gt = batch['x_img_gt']
        M = x_img_gt.shape[0]
        N = 68

        ptsI = x_img.reshape((M,N,2)).permute(0,2,1)
        x = ptsI.unsqueeze(0).permute(0,2,1,3)

        # run the model
        f = calib_net(x) + 300
        betas = sfm_net(x)
        betas = betas.squeeze(0).unsqueeze(-1)
        shape = mu_lm + torch.mm(lm_eigenvec,betas).squeeze().view(N,3)

        # additional optimization on initial solution
        if optimize:
            calib_net.load_state_dict(torch.load(calib_path))
            sfm_net.load_state_dict(torch.load(sfm_path))
            calib_net.eval()
            sfm_net.eval()
            trainfc(calib_net)
            trainfc(sfm_net)

            opt1 = torch.optim.Adam(calib_net.parameters(),lr=1e-4)
            opt2 = torch.optim.Adam(sfm_net.parameters(),lr=1e-5)
            curloss = 100

            for outerloop in itertools.count():

                # camera calibration
                shape = shape.detach()
                for iter in itertools.count():
                    opt1.zero_grad()
                    f = calib_net.forward2(x) + 300
                    K = torch.zeros(3,3).float()
                    K[0,0] = f
                    K[1,1] = f
                    K[2,2] = 1

                    f_error = torch.mean(torch.abs(f - fgt))
                    #rmse = torch.norm(shape_gt - shape,dim=1).mean()

                    # differentiable PnP pose estimation
                    km,c_w,scaled_betas, alphas = util.EPnP(ptsI,shape,K)
                    Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)
                    error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2')
                    error_time = util.getTimeConsistency(shape,R,T)

                    loss = error2d.mean() + 0.01*error_time

                    if iter == 5: break
                    #if iter > 10 and prev_loss < loss:
                    #    break
                    #else:
                    #    prev_loss = loss
                    loss.backward()
                    opt1.step()
                    print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | error2d: {error2d.mean().item():.3f} ")

                # sfm
                f = f.detach()
                for iter in itertools.count():
                    opt2.zero_grad()

                    # shape prediction
                    betas = sfm_net.forward2(x)
                    shape = torch.sum(betas * lm_eigenvec,1)
                    shape = shape.reshape(68,3) + mu_lm
                    shape = shape - shape.mean(0).unsqueeze(0)

                    K = torch.zeros((3,3)).float()
                    K[0,0] = f
                    K[1,1] = f
                    K[2,2] = 1

                    #rmse = torch.norm(shape_gt - shape,dim=1).mean().detach()
                    #rmse = torch.norm(shape_gt - shape,dim=1).mean().detach()

                    # differentiable PnP pose estimation
                    km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
                    Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)
                    error2d = util.getReprojError2(ptsI,shape,R,T,K,show=False,loss='l2')
                    error_time = util.getTimeConsistency(shape,R,T)

                    loss = error2d.mean() + 0.01*error_time
                    if iter == 5: break
                    prev_loss = loss.item()
                    loss.backward()
                    opt2.step()
                    print(f"iter: {iter} | error: {loss.item():.3f} | f/fgt: {f.item():.1f}/{fgt[0].item():.1f} | error2d: {error2d.mean().item():.3f} ")

                # closing condition for outerloop on dual objective
                if torch.abs(curloss - loss) < 0.01: break
                curloss = loss
        else:
            K = torch.zeros(3,3).float()
            K[0,0] = f
            K[1,1] = f
            K[2,2] = 1
            km,c_w,scaled_betas,alphas = util.EPnP(ptsI,shape,K)
            Xc, R, T, mask = util.optimizeGN(km,c_w,scaled_betas,alphas,shape,ptsI,K)

        # get errors
        reproj_errors2 = util.getReprojError2(ptsI,shape,R,T,K)
        rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T)

        reproj_error = reproj_errors2.mean()
        rel_error = rel_errors.mean()
        f_error = torch.abs(fgt - f) / fgt

        # save final prediction
        f_pred.append(f.detach().cpu().item())
        shape_pred.append(shape.detach().cpu().numpy())

        error_2d.append(reproj_error.cpu().data.item())
        error_rel3d.append(rel_error.cpu().data.item())
        error_relf.append(f_error.cpu().data.item())

        print(f" f/fgt: {f[0].item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
        #end for

    # prepare output file
    out_shape = np.stack(shape_pred)
    out_f = np.stack(f_pred)

    matdata = {}
    matdata['shape'] = np.stack(out_shape)
    matdata['f'] = np.stack(out_f)
    matdata['error_2d'] = np.array(error_2d)
    matdata['error_rel3d'] = np.array(error_rel3d)
    matdata['error_relf'] = np.array(error_relf)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(error_2d)}")
    print(f"MEAN seterror_rel3d: {np.mean(error_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(error_relf)}")
Ejemplo n.º 25
0
def test(model, modelin=args.model,outfile=args.out,feature_transform=args.feat_trans):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    if modelin != "":
        model.load_state_dict(torch.load(modelin))
    model.eval()

    # mean shape and eigenvectors for 3dmm
    M = 100
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float()
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    shape = mu_lm

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_depth = []

    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [i*100 for i in range(4,21)]
    for f_test in f_vals:
        # create dataloader
        data = dataloader.TestLoader(f_test)

        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100;
        N = 68;
        batch_size = 1;

        for k in range(len(data)):
            batch = data[k]
            x_cam_gt = batch['x_cam_gt']
            x_w_gt = batch['x_w_gt']
            f_gt = batch['f_gt']
            x_img = batch['x_img'].unsqueeze(0)
            x_img_gt = batch['x_img_gt']
            T_gt = batch['T_gt']
            sequence = batch['x_img'].reshape((M,N,2)).permute(0,2,1)

            all_depth.append(np.mean(T_gt[:,2]))
            all_f.append(f_gt.numpy()[0])

            one = torch.ones(batch_size,M*N,1)
            x_img_one = torch.cat([x_img,one],dim=2)

            # run the model
            out,_,_ = model(x_img_one.permute(0,2,1))
            betas = out[:,:199]
            fout = torch.relu(out[:,199])
            if torch.any(fout < 1): fout = fout+1

            # apply 3DMM model from predicted parameters
            alpha_matrix = torch.diag(betas.squeeze())
            shape_cov = torch.mm(lm_eigenvec,alpha_matrix)
            s = shape_cov.sum(1).view(68,3)
            #shape = (mu_lm + s)
            #shape = mu_lm
            #shape[:,2] = shape[:,2]*-1

            # run epnp using predicted shape and intrinsics
            K = torch.zeros((3,3))
            K[0,0] = fout;
            K[1,1] = fout;
            K[2,2] = 1;
            K[0,2] = 0;
            K[1,2] = 0;
            Xc,R,T = util.EPnP(sequence,shape,K)

            # get errors
            reproj_errors2 = util.getReprojError2(sequence,shape,R,T,K)
            reproj_errors3 = util.getReprojError3(x_cam_gt,shape,R,T)
            rel_errors = util.getRelReprojError3(x_cam_gt,shape,R,T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(f_gt - fout) / f_gt

            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())

            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(f"f/sequence: {f_test}/{k}  | f/fgt: {fout[0].item():.3f}/{f_gt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}")
            #end for

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        #end for

    all_f = np.stack(all_f).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_3d = np.stack(allerror_3d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile,matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")
Ejemplo n.º 26
0
def test(modelin=args.model,
         outfile=args.out,
         feature_transform=args.feat_trans):

    # define model, dataloader, 3dmm eigenvectors, optimization method
    #if modelin != "":
    #    model.load_state_dict(torch.load(modelin))
    #model.eval()
    #model.cuda()

    # mean shape and eigenvectors for 3dmm
    M = 100
    N = 68
    data3dmm = dataloader.SyntheticLoader()
    mu_lm = torch.from_numpy(data3dmm.mu_lm).float().detach()
    mu_lm[:, 2] = mu_lm[:, 2] * -1
    lm_eigenvec = torch.from_numpy(data3dmm.lm_eigenvec).float()
    #optimizer = torch.optim.Adam(model.parameters(),lr=1e-2)

    # sample from f testing set
    allerror_2d = []
    allerror_3d = []
    allerror_rel3d = []
    allerror_relf = []
    all_f = []
    all_depth = []

    seterror_3d = []
    seterror_rel3d = []
    seterror_relf = []
    seterror_2d = []
    f_vals = [400 + i * 100 for i in range(4)]

    # set random seed for reproducibility of test set
    np.random.seed(0)
    torch.manual_seed(0)
    for f_test in f_vals:
        f_test = 1400
        # create dataloader
        loader = dataloader.TestLoader(f_test)

        error_2d = []
        error_3d = []
        error_rel3d = []
        error_relf = []
        M = 100
        N = 68
        batch_size = 1

        for j, data in enumerate(loader):
            # create a model and optimizer for it
            model2 = Model1(k=199, feature_transform=False)
            model2.apply(util.init_weights)
            model = Model1(k=1, feature_transform=False)
            model.apply(util.init_weights)
            opt1 = torch.optim.Adam(model2.parameters(), lr=1e-1)
            opt2 = torch.optim.Adam(model.parameters(), lr=1e-1)

            # load the data
            x_cam_gt = data['x_cam_gt']
            shape_gt = data['x_w_gt']
            fgt = data['f_gt']
            x_img = data['x_img']
            x_img_gt = data['x_img_gt']
            T_gt = data['T_gt']
            all_depth.append(np.mean(T_gt[:, 2]))
            all_f.append(fgt.numpy()[0])

            x_img_pts = x_img.reshape((M, N, 2)).permute(0, 2, 1)
            one = torch.ones(M * N, 1)
            x_img_one = torch.cat([x_img, one], dim=1)
            x_cam_pt = x_cam_gt.permute(0, 2, 1).reshape(M * N, 3)
            x = x_img_one.permute(1, 0)

            ptsI = x_img.reshape((M, N, 2)).permute(0, 2, 1)

            # multi objective optimization
            shape = mu_lm
            for outerloop in itertools.count():

                # calibration alg3
                shape = shape.detach()
                for iter2 in itertools.count():
                    opt2.zero_grad()

                    # focal length prediction
                    curf, _, _ = model(x.unsqueeze(0))
                    curf = curf + 300
                    K = torch.zeros((3, 3)).float()
                    K[0, 0] = curf
                    K[1, 1] = curf
                    K[2, 2] = 1

                    # RMSE between GT and predicted shape
                    rmse = torch.norm(shape_gt - shape, dim=1).mean().detach()

                    # differentiable PnP pose estimation
                    km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
                    Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas,
                                                     alphas, shape, ptsI, K)
                    error2d = util.getReprojError2(ptsI,
                                                   shape,
                                                   R,
                                                   T,
                                                   K,
                                                   show=False,
                                                   loss='l2')
                    loss = error2d.mean()
                    if iter2 > 20 and prev_loss < loss:
                        break
                    else:
                        prev_loss = loss
                    loss.backward()
                    opt2.step()
                    print(
                        f"iter: {iter2} | error: {loss.item():.3f} | f/fgt: {curf.item():.1f}/{fgt[0].item():.1f} | rmse: {rmse.item():.2f}"
                    )

                # sfm alg2
                curf = curf.detach()
                for iter1 in itertools.count():
                    opt1.zero_grad()

                    # shape prediction
                    betas, _, _ = model2(x.unsqueeze(0))
                    shape = torch.sum(betas * lm_eigenvec, 1)
                    shape = shape.reshape(68, 3) + mu_lm
                    K = torch.zeros((3, 3)).float()
                    K[0, 0] = curf
                    K[1, 1] = curf
                    K[2, 2] = 1

                    # RMSE between GT and predicted shape
                    rmse = torch.norm(shape_gt - shape, dim=1).mean().detach()

                    # differentiable PnP pose estimation
                    km, c_w, scaled_betas, alphas = util.EPnP(ptsI, shape, K)
                    Xc, R, T, mask = util.optimizeGN(km, c_w, scaled_betas,
                                                     alphas, shape, ptsI, K)
                    error2d = util.getReprojError2(ptsI,
                                                   shape,
                                                   R,
                                                   T,
                                                   K,
                                                   show=False,
                                                   loss='l2')
                    loss = error2d.mean()
                    if iter1 > 20 and prev_loss < loss:
                        break
                    else:
                        prev_loss = loss
                    loss.backward()
                    opt1.step()
                    print(
                        f"iter: {iter1} | error: {loss.item():.3f} | f/fgt: {curf.item():.1f}/{fgt[0].item():.1f} | rmse: {rmse.item():.2f}"
                    )

                # closing condition for outerloop on dual objective
                if outerloop == 4: break

            f = curf
            # get errors
            reproj_errors2 = util.getReprojError2(ptsI, shape, R, T, K)
            reproj_errors3 = util.getReprojError3(x_cam_gt, shape, R, T)
            rel_errors = util.getRelReprojError3(x_cam_gt, shape, R, T)

            reproj_error = reproj_errors2.mean()
            reconstruction_error = reproj_errors3.mean()
            rel_error = rel_errors.mean()
            f_error = torch.abs(fgt - f) / fgt

            allerror_3d.append(reproj_error.data.numpy())
            allerror_2d.append(reconstruction_error.data.numpy())
            allerror_rel3d.append(rel_error.data.numpy())

            error_2d.append(reproj_error.cpu().data.item())
            error_3d.append(reconstruction_error.cpu().data.item())
            error_rel3d.append(rel_error.cpu().data.item())
            error_relf.append(f_error.cpu().data.item())

            print(
                f"f/sequence: {f_test}/{j}  | f/fgt: {f[0].item():.3f}/{fgt.item():.3f} |  f_error_rel: {f_error.item():.4f}  | rmse: {reconstruction_error.item():.4f}  | rel rmse: {rel_error.item():.4f}    | 2d error: {reproj_error.item():.4f}"
            )
            #end for

        avg_2d = np.mean(error_2d)
        avg_rel3d = np.mean(error_rel3d)
        avg_3d = np.mean(error_3d)
        avg_relf = np.mean(error_relf)

        seterror_2d.append(avg_2d)
        seterror_3d.append(avg_3d)
        seterror_rel3d.append(avg_rel3d)
        seterror_relf.append(avg_relf)
        #end for
        break

    all_f = np.stack(all_f).flatten()
    all_d = np.stack(all_depth).flatten()
    allerror_2d = np.stack(allerror_2d).flatten()
    allerror_3d = np.stack(allerror_3d).flatten()
    allerror_rel3d = np.stack(allerror_rel3d).flatten()

    matdata = {}
    matdata['fvals'] = np.array(f_vals)
    matdata['all_f'] = np.array(all_f)
    matdata['all_d'] = np.array(all_depth)
    matdata['error_2d'] = allerror_2d
    matdata['error_3d'] = allerror_3d
    matdata['error_rel3d'] = allerror_rel3d
    matdata['seterror_2d'] = np.array(seterror_2d)
    matdata['seterror_3d'] = np.array(seterror_3d)
    matdata['seterror_rel3d'] = np.array(seterror_rel3d)
    matdata['seterror_relf'] = np.array(seterror_relf)
    scipy.io.savemat(outfile, matdata)

    print(f"MEAN seterror_2d: {np.mean(seterror_2d)}")
    print(f"MEAN seterror_3d: {np.mean(seterror_3d)}")
    print(f"MEAN seterror_rel3d: {np.mean(seterror_rel3d)}")
    print(f"MEAN seterror_relf: {np.mean(seterror_relf)}")