コード例 #1
0
def unrealistic_pose_loss(ske_fs):
    ske_fs = U.unscale_features(ske_fs, ske_scales)
    Rs = batch_rodrigues(ske_fs.reshape(-1, 9)[:, :3]).reshape(-1, pnum, 3, 3)
    loss = 0
    rR = Rs[:, 13, :].matmul(Rs[:, 11, :].permute(0, 2, 1))
    loss += unreal_loss(rR[:, 2, 0])
    rR = Rs[:, 14, :].matmul(Rs[:, 12, :].permute(0, 2, 1))
    loss += unreal_loss(-rR[:, 2, 0])
    rR = Rs[:, 4, :].matmul(Rs[:, 1, :].permute(0, 2, 1))
    loss += unreal_loss(-rR[:, 2, 1])
    loss += unreal_loss(rR[:, 0, 1] - math.sin(3. / 180. * np.pi))
    # loss+=unreal_loss(-(rR[:,0,1]+math.sin(3./180.*np.pi)))
    rR = Rs[:, 5, :].matmul(Rs[:, 2, :].permute(0, 2, 1))
    loss += unreal_loss(-rR[:, 2, 1])
    # loss+=unreal_loss(rR[:,0,1]-math.sin(3./180.*np.pi))
    loss += unreal_loss(-(rR[:, 0, 1] + math.sin(3. / 180. * np.pi)))
    rR = Rs[:, 11, :].matmul(Rs[:, 6, :].permute(0, 2, 1))
    loss += unreal_loss(rR[:, 2, 0] - 0.1)
    loss += unreal_loss(-(rR[:, 0, 0] + math.sin(10. / 180. * np.pi)))
    rR = Rs[:, 12, :].matmul(Rs[:, 7, :].permute(0, 2, 1))
    loss += unreal_loss(-(rR[:, 2, 0] + 0.1))
    loss += unreal_loss(-(rR[:, 0, 0] + math.sin(10. / 180. * np.pi)))
    rR = Rs[:, 1, :].matmul(Rs[:, 0, :].permute(0, 2, 1))
    loss += unreal_loss(rR[:, 2, 0] - 0.7)
    loss += unreal_loss(rR[:, 2, 1] - math.sin(8. / 180. * np.pi))
    loss += unreal_loss(-(rR[:, 0, 2] + math.sin(65. / 180. * np.pi)))
    rR = Rs[:, 2, :].matmul(Rs[:, 0, :].permute(0, 2, 1))
    loss += unreal_loss(-(rR[:, 2, 0] + 0.7))
    loss += unreal_loss(rR[:, 2, 1] - math.sin(8. / 180. * np.pi))
    loss += unreal_loss(rR[:, 0, 2] - math.sin(65. / 180. * np.pi))
    rR = Rs[:, 0, :].matmul(Rs[:, 3, :].permute(0, 2, 1))
    loss += unreal_loss(rR[:, 2, 1] - math.sin(8. / 180. * np.pi))
    return loss
コード例 #2
0
def unrealistic_pose_loss(ske_fs):
    ske_fs = U.unscale_features(ske_fs, ske_scales)
    data_size = ske_fs.shape[0]
    Rs = batch_rodrigues(ske_fs.reshape(-1, 9)[:, :3]).reshape(-1, pnum, 3, 3)
    loss = 0
    rR = Rs[:, 9, :].matmul(Rs[:, 13, :].permute(0, 2, 1))
    loss += unreal_loss(rR, 0, 1, 1, -10, 'maxer')
    rR = Rs[:, 10, :].matmul(Rs[:, 14, :].permute(0, 2, 1))
    loss += unreal_loss(rR, 0, 1, 1, -10, 'maxer')
    rR = Rs[:, 2, :].matmul(Rs[:, 4, :].permute(0, 2, 1))
    loss += unreal_loss(rR, 0, 1, 1, 0, 'miner')
    rR = Rs[:, 3, :].matmul(Rs[:, 5, :].permute(0, 2, 1))
    loss += unreal_loss(rR, 0, 1, 1, 0, 'miner')
    return loss
コード例 #3
0
def opt_rep_to_points(iss, ips, tarps, Rs, Ts, initRs, initTs, r_ps_w, r_ss_w,
                      unreal_pose_w):
    ss = iss.new_zeros(iss.shape)
    ps = ips.new_zeros(ips.shape)
    tarps = tarps.detach()
    Rs = Rs.detach()
    Ts = Ts.detach().clone()
    ss = ss.copy_(iss.detach())
    ps = ps.copy_(ips.detach())
    ss.requires_grad = True
    ps.requires_grad = True
    Ts.requires_grad = True
    data_size = Ts.shape[0]
    optimizer = optim.Adam([ss, ps, Ts], lr=0.004)
    time = 0
    while time < 1000:
        regular_shape_loss = torch.pow(ss, 2).sum()
        regular_pose_loss = pose_prior(ps).sum()
        loss = r_ps_w * regular_pose_loss + r_ss_w * regular_shape_loss
        (ske_pfs, pfs) = bodyrep(ss, ps)
        if unreal_pose_w > 0.:
            unreal_pose_loss = unrealistic_pose_loss(ske_pfs).sum()
            loss += unreal_pose_w * unreal_pose_loss
        rec_points = convert_f_points(U.unscale_features(pfs, scales)).permute(
            0, 2, 1).reshape(-1, 12500)
        rec_points = RT_points_Tensor(rec_points, Rs, initRs, Ts + initTs)
        corre_loss = (rec_points - tarps).reshape(-1, 3,
                                                  12500).norm(2,
                                                              1).mean(1).sum()
        loss += corre_loss * 1000.
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if time % 100 == 0:
            out_info = 'inner %d opt %.4f, p2p:%.4f, regu shape:%.4f, regu pose:%.4f' % (
                time, loss.item() / data_size, corre_loss.item() / data_size,
                regular_shape_loss.item() / data_size,
                regular_pose_loss.item() / data_size)
            if unreal_pose_w > 0.:
                out_info += ', unreal:{:.6f}'.format(unreal_pose_loss.item() /
                                                     data_size)
            out_info += ', Rangle:{:.4f}, Ts:{:.4f}'.format(
                Rs.norm(2, 1).mean().item() / 3.1415926 * 180.0,
                Ts.norm(2, 1).mean().item())
            print(out_info)
        time += 1
    return ss, ps, Ts
コード例 #4
0
 Ts = torch.zeros((data_size, 3)).to(device)
 ss.requires_grad = True
 ps.requires_grad = True
 Rs.requires_grad = True
 Ts.requires_grad = True
 optimizer = optim.Adam([ss, ps, Rs, Ts], lr=0.04)
 scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                            milestones=[800, 1600, 2100],
                                            gamma=0.4)
 rec_loss = (torch.ones((1)) * 100.0).to(device)
 times = 0
 print('start optimize {} to {} meshes...'.format(start_id, end_id))
 while rec_loss.item() > thred and times < max_iters:
     (r_ps_w, r_ss_w) = get_weight(times)
     (ske_pfs, pfs) = bodyrep(ss, ps)
     rec_points = convert_f_points(U.unscale_features(
         pfs, bodyrep.vs_scale)).permute(0, 2, 1).reshape(-1, vnum)
     rec_points = U.RT_points_Tensor(rec_points, Rs, Ts)
     rec_loss = U.GeometryEqualLoss(rec_points, tar_points)
     regular_shape_loss = torch.pow(ss, 2).mean()
     regular_pose_loss = torch.pow(ps, 2).mean()
     loss = rec_weight * rec_loss + r_ss_w * regular_shape_loss + r_ps_w * regular_pose_loss
     optimizer.zero_grad()
     loss.backward()
     optimizer.step()
     scheduler.step()
     if times % log_internal == 0:
         print('{}th total mean loss:{:.6f}'.format(times, loss.item()))
         print('rec_loss mean:{:.6f}'.format(rec_loss.item()))
         print('regular_shape_loss:{:.6f}, regular_pose_loss:{:.6f}'.format(
             regular_shape_loss.item(), regular_pose_loss.item()))
         print('Rangle mean:{:.6f}, Ts mean:{:.6f}'.format(
コード例 #5
0
def opt_rep_to_surface(ss, ps, tar_pt_points, tar_pt_norms, tar_pt_vnums, Rs,
                       Ts, initRs, initTs, data_size, r_ps_w, r_ss_w,
                       unreal_pose_w, icp_thread, icp_angle_thread):
    global tri_fs, vertex_index, face_index, checks_not_hands
    ss = ss.detach().clone()
    ps = ps.detach().clone()
    Rs = Rs.detach().clone()
    Ts = Ts.detach().clone()
    ss.requires_grad = True
    ps.requires_grad = True
    Ts.requires_grad = True
    Rs.requires_grad = True
    optimizer = optim.Adam([ss, ps, Rs, Ts], lr=0.001)
    time = 0
    while time < 600:
        regular_shape_loss = torch.pow(ss, 2).sum()
        regular_pose_loss = pose_prior(ps).sum()
        loss = r_ps_w * regular_pose_loss + r_ss_w * regular_shape_loss
        (ske_pfs, pfs) = bodyrep(ss, ps)
        if unreal_pose_w > 0.:
            unreal_pose_loss = unrealistic_pose_loss(ske_pfs).sum()
            loss += unreal_pose_w * unreal_pose_loss
        rec_points = convert_f_points(U.unscale_features(pfs, scales)).permute(
            0, 2, 1).reshape(-1, 12500)
        rec_points = RT_points_Tensor(rec_points, Rs, initRs, Ts + initTs)

        _, knn_indexs = batch_knn_gpu_pytorch(
            tar_pt_points.reshape(data_size, 3, tar_pt_points.shape[-1]),
            rec_points.reshape(data_size, 3, 12500).permute(1, 0,
                                                            2).reshape(3, -1),
            tar_pt_vnums, np.array([12500] * data_size, np.int32), 1)
        knn_indexs = knn_indexs.reshape(data_size, 12500)
        knn_vecs = torch.zeros(data_size * 3, 12500, device=device)
        knn_vecs[
            0::3, :] = rec_points[0::3, :] - tar_pt_points[0::3, :].gather(
                1, knn_indexs)
        knn_vecs[
            1::3, :] = rec_points[1::3, :] - tar_pt_points[1::3, :].gather(
                1, knn_indexs)
        knn_vecs[
            2::3, :] = rec_points[2::3, :] - tar_pt_points[2::3, :].gather(
                1, knn_indexs)
        knn_dists = knn_vecs.reshape(data_size, 3, 12500).norm(p=None, dim=1)
        rec_norms = U.compute_vnorms(
            rec_points.reshape(data_size, 3, 12500).permute(0, 2, 1), tri_fs,
            vertex_index, face_index).permute(0, 2,
                                              1).reshape(3 * data_size, 12500)
        tar_corre_norms = torch.zeros(data_size * 3, 12500, device=device)
        check1 = (knn_dists < icp_thread).to(
            torch.float) * checks_not_hands.reshape(1, -1)
        tar_corre_norms[0::3, :] = tar_pt_norms[0::3, :].gather(1, knn_indexs)
        tar_corre_norms[1::3, :] = tar_pt_norms[1::3, :].gather(1, knn_indexs)
        tar_corre_norms[2::3, :] = tar_pt_norms[2::3, :].gather(1, knn_indexs)
        check2 = ((rec_norms * tar_corre_norms).reshape(
            data_size, 3, 12500).sum(1) > math.cos(
                icp_angle_thread * np.pi / 180.)).to(torch.float)
        valid_pair_index = check1 * check2
        #p2n dists
        knn_dists = torch.abs(
            (knn_vecs * tar_corre_norms).reshape(data_size, 3, 12500).sum(1))
        #can modify knn_dists as Geman_McClure_Loss
        if valid_pair_index.sum() > 0:
            rec_p2p_loss = ((knn_dists * valid_pair_index).sum(1) /
                            valid_pair_index.sum(1)).sum()
        else:
            rec_p2p_loss = (knn_dists * valid_pair_index).sum()
        loss = loss + 1000. * rec_p2p_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if time % 50 == 0:
            out_info = 'final %d opt %.4f, p2n:%.4f, regu shape:%.4f, regu pose:%.4f' % (
                time, loss.item() / data_size, rec_p2p_loss.item() / data_size,
                regular_shape_loss.item() / data_size,
                regular_pose_loss.item() / data_size)
            if unreal_pose_w > 0.:
                out_info += ', unreal:{:.6f}'.format(unreal_pose_loss.item() /
                                                     data_size)
            out_info += ', Rangle:{:.4f}, Ts:{:.4f}'.format(
                Rs.norm(2, 1).mean().item() / 3.1415926 * 180.0,
                Ts.norm(2, 1).mean().item())
            print(out_info)
        time += 1
    return ss, ps, Rs, Ts
コード例 #6
0
    om.read_trimesh('../../models/bodyTem/template.obj'), device)
#FARM arap landmarks
select_landmarks = torch.from_numpy(
    np.array([11556, 8792, 6265, 8437, 5732, 4727, 2687, 739, 2645, 753],
             np.int64)).to(device)

obj_files = [
    os.path.join('Faust_test_scan', f + '.ply')
    for f in ['test_scan_050', 'test_scan_103']
]
save_root = 'bodyrep_rec'
if not os.path.isdir(save_root):
    os.system('mkdir ' + save_root)
ref_pos = convert_f_points(
    U.unscale_features(
        bodyrep(torch.zeros(1, shapenum, device=device),
                torch.zeros(1, posenum, device=device))[1],
        scales)).permute(0, 2, 1).reshape(3, -1)
ref_mean_pos = ref_pos.mean(1)
ref_mean_pos = ref_mean_pos.cpu().numpy()

with open('body_finger_ear_vids.txt', 'r') as ff:
    body_hids = np.array([int(v) for v in ff.read().split()], np.int32)
    checks_not_hands = np.ones((12500), np.float32)
    checks_not_hands[body_hids] = 0.
    assert (checks_not_hands.sum() == 12500 - body_hids.size)
    checks_not_hands = torch.from_numpy(checks_not_hands).to(device)

weights = list(
    zip([20, 10, 1, 0, 0, 0], [20, 10, 1, 0, 0, 0],
        [1000, 2000, 400, 100., 10., 0.], [200, 200, 600, 700, 800, 850],
        [0.15, 0.18, 0.18, 0.20, 0.20, 0.20], [-1, -1, 40, 15, 5, 2.],
コード例 #7
0
def optimize_for_deepcut_j2ds(folder, js, log_internal, max_iters):
    fsave_root = os.path.join(save_root, folder + '_ms')
    # fsave_root=save_root
    if not os.path.isdir(fsave_root):
        os.makedirs(fsave_root)
    batch_size = 350
    start_id = 0
    fnum = js.shape[0]
    RTs = np.zeros((0, 6), dtype=np.float32)
    rec_shapes = np.zeros((0, shapenum), dtype=np.float32)
    rec_poses = np.zeros((0, posenum), dtype=np.float32)
    rec_apts = np.zeros((0, 6890), dtype=np.float32)
    print('start optimize ' + folder + ' data, with interval %d...' % INTERVAL)
    while start_id < fnum:
        end_id = start_id + batch_size
        if end_id > fnum:
            end_id = fnum
        print('{} to {} frames...'.format(start_id, end_id))
        tar_js = torch.from_numpy(js[start_id:end_id, :, :]).to(device)
        data_size = end_id - start_id
        ss = torch.zeros((data_size, shapenum)).to(device)

        # ps=torch.from_numpy(init_p.reshape(1,-1).repeat(data_size,axis=0)).to(device)
        ps = torch.zeros((data_size, posenum)).to(device)
        # Rs=torch.zeros((data_size,3)).to(device)
        # Rs.data.uniform_(-0.01,0.01)
        # make initial stand in front of camera
        Rs = torch.zeros((data_size, 3))
        Rs = Rs.copy_(
            torch.from_numpy(np.array([np.pi, 0, 0],
                                      np.float32)).reshape(1, 3)).to(device)

        Ts = torch.zeros((data_size, 3)).to(device)
        initial_depths = init_trans(tar_js)
        Ts[:, 2] = initial_depths
        Rs.requires_grad = True
        Ts.requires_grad = True
        optimizerInitial = optim.Adam([Rs, Ts], lr=0.04)
        schedulerInitial = optim.lr_scheduler.MultiStepLR(optimizerInitial,
                                                          milestones=[200],
                                                          gamma=0.4)
        times = 0
        print('start initial 0th cam external params...')
        (_, pfs) = bodyExtRep(ss, ps)
        initial_points = convert_f_points(U.unscale_features(
            pfs, scales)).permute(0, 2, 1).reshape(-1, 6890)
        while times < 400:
            rec_points = U.RT_points_Tensor(initial_points, Rs, Ts)
            body_joints = rec_points.matmul(j_regressor)
            extract_bjoints = body_joints[:, [9, 3, 8, 2]]
            projs = ProjectPointRadial(
                extract_bjoints.reshape(data_size, 3,
                                        4).permute(0, 2, 1).reshape(-1, 3),
                cam_fs, cam_cs, cam_ks, cam_ps)
            k2d_loss = (projs.reshape(data_size, 4, 2) -
                        tar_js[:, [9, 3, 8, 2], :2]).norm(
                            p=2, dim=-1).mean(1).sum()
            depth_regu_loss = torch.pow(Ts[:, 2] - initial_depths, 2).sum()
            loss = k2d_loss + 100. * depth_regu_loss
            optimizerInitial.zero_grad()
            loss.backward()
            optimizerInitial.step()
            schedulerInitial.step()
            if times % log_internal == 0:
                print(
                    '{}th total mean loss:{:.6f}\nk2d_loss mean:{:.6f},depth_regu_loss:{:.6f}'
                    .format(times,
                            loss.item() / data_size,
                            k2d_loss.item() / data_size,
                            depth_regu_loss.item() / data_size))
                print('Rangle mean:{:.6f}, Ts mean:{:.6f}'.format(
                    Rs.norm(2, 1).mean().item() / 3.1415926 * 180.0,
                    Ts.norm(2, 1).mean().item()))
            times += 1
        final_k2d_loss = torch.ones((data_size, ), device=device) * 10000.
        final_Ts = torch.zeros_like(Ts, device=device)
        final_Rs = torch.zeros_like(Rs, device=device)
        final_oriRs = torch.zeros_like(Rs, device=device)
        final_ss = torch.zeros_like(ss, device=device)
        final_ps = torch.zeros_like(ps, device=device)
        # offset_axisangle=[np.array([0.,0.,0.]),np.array([0.,np.pi,0.]),np.array([np.pi,0.,0.])]
        offset_axisangle = [np.array([0., 0., 0.]), np.array([0., np.pi, 0.])]
        for i, offset in enumerate(offset_axisangle):
            ori_rRs = reverse_rotation(
                Rs.detach().cpu().numpy().astype(np.float64), offset)
            ori_rRs = torch.from_numpy(ori_rRs.astype(np.float32)).to(device)
            ori_Ts = torch.zeros((data_size, 3)).to(device)
            ori_Ts = ori_Ts.copy_(Ts.detach())
            if i > 0:
                ori_rRs.requires_grad = True
                ori_Ts.requires_grad = True
                optimizerInitial = optim.Adam([ori_rRs, ori_Ts], lr=0.04)
                schedulerInitial = optim.lr_scheduler.MultiStepLR(
                    optimizerInitial, milestones=[200], gamma=0.4)
                times = 0
                print('start initial %dth cam external params...' % i)
                while times < 400:
                    rec_points = U.RT_points_Tensor(initial_points, ori_rRs,
                                                    ori_Ts)
                    body_joints = rec_points.matmul(j_regressor)
                    extract_bjoints = body_joints[:, [9, 3, 8, 2]]
                    projs = ProjectPointRadial(
                        extract_bjoints.reshape(data_size, 3,
                                                4).permute(0, 2,
                                                           1).reshape(-1, 3),
                        cam_fs, cam_cs, cam_ks, cam_ps)
                    k2d_loss = (projs.reshape(data_size, 4, 2) -
                                tar_js[:, [9, 3, 8, 2], :2]).norm(
                                    p=2, dim=-1).mean(1).sum()
                    depth_regu_loss = torch.pow(ori_Ts[:, 2] - Ts[:, 2],
                                                2).sum()
                    loss = k2d_loss + 100. * depth_regu_loss
                    optimizerInitial.zero_grad()
                    loss.backward()
                    optimizerInitial.step()
                    schedulerInitial.step()
                    if times % log_internal == 0:
                        print(
                            '{}th total mean loss:{:.6f}\nk2d_loss mean:{:.6f},depth_regu_loss:{:.6f}'
                            .format(times,
                                    loss.item() / data_size,
                                    k2d_loss.item() / data_size,
                                    depth_regu_loss.item() / data_size))
                        print('Rangle mean:{:.6f}, Ts mean:{:.6f}'.format(
                            ori_rRs.norm(2, 1).mean().item() / 3.1415926 *
                            180.0,
                            ori_Ts.norm(2, 1).mean().item()))
                    times += 1
                ori_rRs = ori_rRs.detach()
                ori_Ts = ori_Ts.detach()
            # for pose_ind,initp in enumerate([init_p0,init_p1]):
            for pose_ind, initp in enumerate([init_p0, init_p1]):
                print(
                    'start optimize with pose {:d} {:d}th initial rotation...'.
                    format(pose_ind, i))
                rRs, rTs, rss, rps, record_loss = optimize_with_initp(
                    ori_rRs, ori_Ts, initp, ss, tar_js, data_size, max_iters,
                    log_internal)
                # re_k2d_loss=100.*re_k2d_loss+re_pose_loss
                temp_check = (final_k2d_loss > record_loss)
                final_Rs[temp_check, :] = rRs[temp_check, :]
                final_oriRs[temp_check, :] = ori_rRs[temp_check, :]
                final_Ts[temp_check, :] = rTs[temp_check, :]
                final_ss[temp_check, :] = rss[temp_check, :]
                final_ps[temp_check, :] = rps[temp_check, :]
                final_k2d_loss[temp_check] = record_loss[temp_check]

                # (ske_sfs,sfs),(ske_pfs,pfs)=sp_vae.decoder(rss,rps)
                # rec_points=convert_f_points(U.unscale_features(pfs,scales)).permute(0,2,1).reshape(-1,6890)
                # rec_points=RT_points_Tensor(rec_points,rRs,ori_rRs,rTs)
                # temp_save_root=os.path.join(save_root,folder+'_ms{:d}_{:d}'.format(pose_ind,i))
                # if not os.path.isdir(temp_save_root):
                # 	os.mkdir(temp_save_root)
                # snames=[str(name) for name in np.arange(start_id,end_id)*INTERVAL]
                # D.save_obj_files(rec_points.detach().cpu().numpy().T,ref_tris,temp_save_root,[name+'.obj' for name in snames])
        (ske_pfs, pfs) = bodyExtRep(final_ss, final_ps)
        rec_points = convert_f_points(U.unscale_features(pfs, scales)).permute(
            0, 2, 1).reshape(-1, 6890)
        rec_points = RT_points_Tensor(rec_points, final_Rs, final_oriRs,
                                      final_Ts)
        RTs = np.concatenate((RTs, torch.cat(
            (final_Rs, final_Ts), 1).detach().cpu().numpy()), 0)
        rec_shapes = np.concatenate(
            (rec_shapes, final_ss.detach().cpu().numpy()), 0)
        rec_poses = np.concatenate(
            (rec_poses, final_ps.detach().cpu().numpy()), 0)
        rec_apts = np.concatenate(
            (rec_apts, rec_points.detach().cpu().numpy()), 0)
        snames = [str(name) for name in np.arange(start_id, end_id) * INTERVAL]
        U.save_obj_files(rec_points.detach().cpu().numpy().T,
                         convert_f_points.face_index.cpu().numpy(), fsave_root,
                         [name + '.obj' for name in snames])
        start_id = end_id
    with open(os.path.join(fsave_root, 'rec_data.pkl'), 'wb') as ff:
        pickle.dump(
            {
                'RTs': RTs,
                'rec_shapes': rec_shapes,
                'rec_poses': rec_poses,
                'rec_points': rec_apts,
                'folder': folder,
                'frames': np.arange(fnum) * INTERVAL
            }, ff)
コード例 #8
0
def optimize_with_initp(ori_rRs, ori_Ts, initp, ss, tar_js, data_size,
                        max_iters, log_internal):
    rTs = torch.zeros((data_size, 3)).to(device)
    rTs = rTs.copy_(ori_Ts.detach())
    rRs = torch.zeros((data_size, 3)).to(device)
    rRs.requires_grad = True
    rTs.requires_grad = True
    rss = torch.zeros((data_size, shapenum)).to(device)
    rps = torch.from_numpy(initp.reshape(1, -1).repeat(data_size,
                                                       axis=0)).to(device)
    # rps=torch.zeros((data_size,posenum)).to(device)
    rss.copy_(ss.detach())
    # rps.copy_(ps.detach())
    rss.requires_grad = True
    rps.requires_grad = True
    re_optimizer = optim.Adam([rss, rps, rRs, rTs], lr=0.01)
    re_scheduler = optim.lr_scheduler.MultiStepLR(re_optimizer,
                                                  milestones=[400, 600, 800],
                                                  gamma=0.4)
    times = 0
    while times < max_iters:
        (js_w, r_ps_w, r_ss_w, r_rot_w, gmc) = get_weight(times)
        # record_pose_loss=torch.pow(rps,2).mean(1)
        record_pose_loss = pose_prior(rps)
        regular_pose_loss = record_pose_loss.sum()
        record_shape_loss = torch.pow(rss, 2).mean(1)
        regular_shape_loss = record_shape_loss.sum()
        record_rot_loss = rRs.norm(2, 1)
        regular_rot_loss = record_rot_loss.sum()
        loss = max(r_ps_w, 0) * regular_pose_loss + r_ss_w * regular_shape_loss
        # record_loss=max(r_ps_w,0)*record_pose_loss+r_ss_w*record_shape_loss
        if r_rot_w > 0.:
            loss = loss + r_rot_w * regular_rot_loss
            # record_loss=record_loss+r_rot_w*record_rot_loss

        (ske_pfs, pfs) = bodyExtRep(rss, rps)
        if unreal_pose_w > 0.:
            record_unreal_loss = unrealistic_pose_loss(ske_pfs)
            record_loss = unreal_pose_w * record_unreal_loss
            unreal_loss = record_unreal_loss.sum()
            loss = loss + unreal_pose_w * unreal_loss
        else:
            unreal_loss = torch.zeros(1, device=device)

        rec_points = convert_f_points(U.unscale_features(pfs, scales)).permute(
            0, 2, 1).reshape(-1, 6890)
        rec_points = RT_points_Tensor(rec_points, rRs, ori_rRs, rTs)
        extract_bjoints = rec_points.matmul(j_regressor)
        projs = ProjectPointRadial(
            extract_bjoints.reshape(data_size, 3,
                                    14).permute(0, 2, 1).reshape(-1, 3),
            cam_fs, cam_cs, cam_ks, cam_ps)
        projs = projs.reshape(data_size, 14, 2)
        record_k2d_loss0 = (projs - tar_js[:, :, :2]).norm(p=2, dim=-1)
        if gmc > 0.:
            record_k2d_loss = (U.Geman_McClure_Loss(record_k2d_loss0, gmc) *
                               tar_js[:, :, 2]).mean(1)
        else:
            record_k2d_loss = record_k2d_loss0.mean(1)
        # record_k2d_loss=(record_k2d_loss0*tar_js[:,:,2]).mean(1)
        k2d_loss = record_k2d_loss.sum()

        loss = js_w * k2d_loss + loss
        # record_loss=record_loss+js_w*record_k2d_loss
        record_loss = record_k2d_loss
        re_optimizer.zero_grad()
        loss.backward()
        if r_rot_w < 0.:
            rRs.grad.zero_()
        if r_ps_w < 0.:
            rps.grad.zero_()
        re_optimizer.step()
        re_scheduler.step()
        # print('{:.6f}'.format(rec_loss.item()))
        if times % log_internal == 0:
            print(
                '{}th total mean loss:{:.6f}\nk2d_loss mean:{:.6f},regular_shape_loss:{:.6f}, regular_pose_loss:{:.6f}, unreal loss:{:.6f}'
                .format(times,
                        loss.item() / data_size,
                        k2d_loss.item() / data_size,
                        regular_shape_loss.item() / data_size,
                        regular_pose_loss.item() / data_size,
                        unreal_loss.item() / data_size))
            print('Rangle mean:{:.6f}, Ts mean:{:.6f}'.format(
                regular_rot_loss.item() / 3.1415926 * 180.0 / data_size,
                rTs.norm(2, 1).mean().item()))
        times += 1
    return rRs, rTs, rss, rps, record_loss