Esempio n. 1
0
def init(**kwarg):

    setting = {}
    # create folder
    output_folder = kwarg.pop('output_folder')
    output_folder = osp.expandvars(output_folder)
    if not osp.exists(output_folder):
        os.makedirs(output_folder)

    # Store the arguments for the current experiment
    conf_fn = osp.join(output_folder, 'conf.yaml')
    with open(conf_fn, 'w') as conf_file:
        yaml.dump(kwarg, conf_file)

    result_folder = kwarg.pop('result_folder', 'results')
    result_folder = osp.join(output_folder, result_folder)
    if not osp.exists(result_folder):
        os.makedirs(result_folder)

    mesh_folder = kwarg.pop('mesh_folder', 'meshes')
    mesh_folder = osp.join(output_folder, mesh_folder)
    if not osp.exists(mesh_folder):
        os.makedirs(mesh_folder)

    out_img_folder = osp.join(output_folder, 'images')
    if not osp.exists(out_img_folder):
        os.makedirs(out_img_folder)

    # assert cuda is available
    use_cuda = kwarg.get('use_cuda', True)
    if use_cuda and not torch.cuda.is_available():
        print('CUDA is not available, exiting!')
        sys.exit(-1)

    #read gender
    input_gender = kwarg.pop('gender', 'neutral')
    model_type = kwarg.get('model_type')
    if model_type == 'smpllsp':
        assert(input_gender=='neutral'), 'smpl-lsp model support neutral only'
    gender_lbl_type = kwarg.pop('gender_lbl_type', 'none')

    if model_type == 'smpllsp':
        # the hip joint of smpl is different with 2D annotation predicted by openpose/alphapose, so we use smpl-lsp model to replace
        pose_format = 'lsp14' 
    else:
        pose_format = 'coco17'

    dataset_obj = create_dataset(pose_format=pose_format, **kwarg)

    float_dtype = kwarg.get('float_dtype', 'float32')
    if float_dtype == 'float64':
        dtype = torch.float64
    elif float_dtype == 'float32':
        dtype = torch.float32
    else:
        raise ValueError('Unknown float type {}, exiting!'.format(float_dtype))

    # map smpl joints to 2D keypoints
    joint_mapper = JointMapper(dataset_obj.get_model2data())

    model_params = dict(model_path=kwarg.get('model_folder'),
                        joint_mapper=joint_mapper,
                        create_global_orient=True,
                        create_body_pose=not kwarg.get('use_vposer'),
                        create_betas=True,
                        create_left_hand_pose=False,
                        create_right_hand_pose=False,
                        create_expression=False,
                        create_jaw_pose=False,
                        create_leye_pose=False,
                        create_reye_pose=False,
                        create_transl=True, #set transl in multi-view task  --Buzhen Huang 07/31/2019
                        create_scale=True,
                        dtype=dtype,
                        **kwarg)

    model = smplx.create_scale(gender=input_gender, **model_params)

    # load camera parameters
    cam_params = kwarg.pop('cam_param')
    extris, intris = load_camera_para(cam_params)
    trans, rot = get_rot_trans(extris, photoscan=False)


    # Create the camera object
    # create camera
    views = len(extris)
    camera = []
    for v in range(views):
        focal_length = float(intris[v][0][0])
        rotate = torch.tensor(rot[v],dtype=dtype).unsqueeze(0)
        translation = torch.tensor(trans[v],dtype=dtype).unsqueeze(0)
        center = torch.tensor(intris[v][:2,2],dtype=dtype).unsqueeze(0)
        camera_t = create_camera(focal_length_x=focal_length,
                            focal_length_y=focal_length,
                            translation=translation,
                            rotation=rotate,
                            center=center,
                            dtype=dtype,
                            **kwarg)
        camera.append(camera_t)

    # fix rotation and translation of camera
    for cam in camera:
        if hasattr(cam, 'rotation'):
            cam.rotation.requires_grad = False
        if hasattr(cam, 'translation'):
            cam.translation.requires_grad = False

    # create prior
    body_pose_prior = create_prior(
        prior_type=kwarg.get('body_prior_type'),
        dtype=dtype,
        **kwarg)
    shape_prior = create_prior(
        prior_type=kwarg.get('shape_prior_type', 'l2'),
        dtype=dtype, **kwarg)
    angle_prior = create_prior(prior_type='angle', dtype=dtype)

    if use_cuda and torch.cuda.is_available():
        device = torch.device('cuda')

        for cam in camera:
            cam = cam.to(device=device)
        model = model.to(device=device)
        body_pose_prior = body_pose_prior.to(device=device)
        angle_prior = angle_prior.to(device=device)
        shape_prior = shape_prior.to(device=device)
    else:
        device = torch.device('cpu')
    
    # A weight for every joint of the model
    joint_weights = dataset_obj.get_joint_weights().to(device=device,
                                                       dtype=dtype)
    # Add a fake batch dimension for broadcasting
    joint_weights.unsqueeze_(dim=0)

    # load vposer
    vposer = None
    pose_embedding = None
    batch_size = 1
    if kwarg.get('use_vposer'):
        vposer_ckpt = osp.expandvars(kwarg.get('prior_folder'))
        vposer = load_vposer(vposer_ckpt, vp_model='snapshot')
        vposer = vposer.to(device=device)
        vposer.eval()
        pose_embedding = torch.zeros([batch_size, 32],
                                     dtype=dtype, device=device,
                                     requires_grad=True)

    # process fixed parameters
    setting['fix_scale'] = kwarg.get('fix_scale')
    if kwarg.get('fix_scale'):
        setting['fixed_scale'] = np.array(kwarg.get('scale'))
    else:
        setting['fixed_scale'] = None
    if kwarg.get('fix_shape'):
        setting['fixed_shape'] = np.array(kwarg.get('shape'))
    else:
        setting['fixed_shape'] = None

    # return setting
    setting['use_3d'] = kwarg.pop("use_3d")
    setting['extrinsics'] = extris
    setting['intrinsics'] = intris
    setting['model'] = model
    setting['dtype'] = dtype
    setting['device'] = device
    setting['vposer'] = vposer
    setting['joints_weight'] = joint_weights
    setting['body_pose_prior'] = body_pose_prior
    setting['shape_prior'] = shape_prior
    setting['angle_prior'] = angle_prior
    setting['cameras'] = camera
    setting['img_folder'] = out_img_folder
    setting['result_folder'] = result_folder
    setting['mesh_folder'] = mesh_folder
    setting['pose_embedding'] = pose_embedding
    setting['batch_size'] = batch_size
    return dataset_obj, setting
Esempio n. 2
0
File: main.py Progetto: wkailiu/prox
def main(**args):
    data_folder = args.get('recording_dir')
    recording_name = osp.basename(args.get('recording_dir'))
    scene_name = recording_name.split("_")[0]
    base_dir = os.path.abspath(
        osp.join(args.get('recording_dir'), os.pardir, os.pardir))
    keyp_dir = osp.join(base_dir, 'keypoints')
    keyp_folder = osp.join(keyp_dir, recording_name)
    cam2world_dir = osp.join(base_dir, 'cam2world')
    scene_dir = osp.join(base_dir, 'scenes')
    calib_dir = osp.join(base_dir, 'calibration')
    sdf_dir = osp.join(base_dir, 'sdf')
    body_segments_dir = osp.join(base_dir, 'body_segments')

    output_folder = args.get('output_folder')
    output_folder = osp.expandvars(output_folder)
    output_folder = osp.join(output_folder, recording_name)
    if not osp.exists(output_folder):
        os.makedirs(output_folder)

    # Store the arguments for the current experiment
    conf_fn = osp.join(output_folder, 'conf.yaml')
    with open(conf_fn, 'w') as conf_file:
        yaml.dump(args, conf_file)
    #remove 'output_folder' from args list
    args.pop('output_folder')

    result_folder = args.pop('result_folder', 'results')
    result_folder = osp.join(output_folder, result_folder)
    if not osp.exists(result_folder):
        os.makedirs(result_folder)

    mesh_folder = args.pop('mesh_folder', 'meshes')
    mesh_folder = osp.join(output_folder, mesh_folder)
    if not osp.exists(mesh_folder):
        os.makedirs(mesh_folder)

    out_img_folder = osp.join(output_folder, 'images')
    if not osp.exists(out_img_folder):
        os.makedirs(out_img_folder)

    body_scene_rendering_dir = os.path.join(output_folder, 'renderings')
    if not osp.exists(body_scene_rendering_dir):
        os.mkdir(body_scene_rendering_dir)

    float_dtype = args['float_dtype']
    if float_dtype == 'float64':
        dtype = torch.float64
    elif float_dtype == 'float32':
        dtype = torch.float64
    else:
        print('Unknown float type {}, exiting!'.format(float_dtype))
        sys.exit(-1)

    use_cuda = args.get('use_cuda', True)
    if use_cuda and not torch.cuda.is_available():
        print('CUDA is not available, exiting!')
        sys.exit(-1)

    img_folder = args.pop('img_folder', 'Color')
    dataset_obj = create_dataset(img_folder=img_folder,
                                 data_folder=data_folder,
                                 keyp_folder=keyp_folder,
                                 calib_dir=calib_dir,
                                 **args)

    start = time.time()

    input_gender = args.pop('gender', 'neutral')
    gender_lbl_type = args.pop('gender_lbl_type', 'none')
    max_persons = args.pop('max_persons', -1)

    float_dtype = args.get('float_dtype', 'float32')
    if float_dtype == 'float64':
        dtype = torch.float64
    elif float_dtype == 'float32':
        dtype = torch.float32
    else:
        raise ValueError('Unknown float type {}, exiting!'.format(float_dtype))

    joint_mapper = JointMapper(dataset_obj.get_model2data())

    model_params = dict(model_path=args.get('model_folder'),
                        joint_mapper=joint_mapper,
                        create_global_orient=True,
                        create_body_pose=not args.get('use_vposer'),
                        create_betas=True,
                        create_left_hand_pose=True,
                        create_right_hand_pose=True,
                        create_expression=True,
                        create_jaw_pose=True,
                        create_leye_pose=True,
                        create_reye_pose=True,
                        create_transl=True,
                        dtype=dtype,
                        **args)

    male_model = smplx.create(gender='male', **model_params)
    # SMPL-H has no gender-neutral model
    if args.get('model_type') != 'smplh':
        neutral_model = smplx.create(gender='neutral', **model_params)
    female_model = smplx.create(gender='female', **model_params)

    # Create the camera object
    camera_center = None \
        if args.get('camera_center_x') is None or args.get('camera_center_y') is None \
        else torch.tensor([args.get('camera_center_x'), args.get('camera_center_y')], dtype=dtype).view(-1, 2)
    camera = create_camera(focal_length_x=args.get('focal_length_x'),
                           focal_length_y=args.get('focal_length_y'),
                           center=camera_center,
                           batch_size=args.get('batch_size'),
                           dtype=dtype)

    if hasattr(camera, 'rotation'):
        camera.rotation.requires_grad = False

    use_hands = args.get('use_hands', True)
    use_face = args.get('use_face', True)

    body_pose_prior = create_prior(prior_type=args.get('body_prior_type'),
                                   dtype=dtype,
                                   **args)

    jaw_prior, expr_prior = None, None
    if use_face:
        jaw_prior = create_prior(prior_type=args.get('jaw_prior_type'),
                                 dtype=dtype,
                                 **args)
        expr_prior = create_prior(prior_type=args.get('expr_prior_type', 'l2'),
                                  dtype=dtype,
                                  **args)

    left_hand_prior, right_hand_prior = None, None
    if use_hands:
        lhand_args = args.copy()
        lhand_args['num_gaussians'] = args.get('num_pca_comps')
        left_hand_prior = create_prior(
            prior_type=args.get('left_hand_prior_type'),
            dtype=dtype,
            use_left_hand=True,
            **lhand_args)

        rhand_args = args.copy()
        rhand_args['num_gaussians'] = args.get('num_pca_comps')
        right_hand_prior = create_prior(
            prior_type=args.get('right_hand_prior_type'),
            dtype=dtype,
            use_right_hand=True,
            **rhand_args)

    shape_prior = create_prior(prior_type=args.get('shape_prior_type', 'l2'),
                               dtype=dtype,
                               **args)

    angle_prior = create_prior(prior_type='angle', dtype=dtype)

    if use_cuda and torch.cuda.is_available():
        device = torch.device('cuda')

        camera = camera.to(device=device)
        female_model = female_model.to(device=device)
        male_model = male_model.to(device=device)
        if args.get('model_type') != 'smplh':
            neutral_model = neutral_model.to(device=device)
        body_pose_prior = body_pose_prior.to(device=device)
        angle_prior = angle_prior.to(device=device)
        shape_prior = shape_prior.to(device=device)
        if use_face:
            expr_prior = expr_prior.to(device=device)
            jaw_prior = jaw_prior.to(device=device)
        if use_hands:
            left_hand_prior = left_hand_prior.to(device=device)
            right_hand_prior = right_hand_prior.to(device=device)
    else:
        device = torch.device('cpu')

    # A weight for every joint of the model
    joint_weights = dataset_obj.get_joint_weights().to(device=device,
                                                       dtype=dtype)
    # Add a fake batch dimension for broadcasting
    joint_weights.unsqueeze_(dim=0)

    for idx, data in enumerate(dataset_obj):

        img = data['img']
        fn = data['fn']
        keypoints = data['keypoints']
        depth_im = data['depth_im']
        mask = data['mask']
        init_trans = None if data['init_trans'] is None else torch.tensor(
            data['init_trans'], dtype=dtype).view(-1, 3)
        scan = data['scan_dict']
        print('Processing: {}'.format(data['img_path']))

        curr_result_folder = osp.join(result_folder, fn)
        if not osp.exists(curr_result_folder):
            os.makedirs(curr_result_folder)
        curr_mesh_folder = osp.join(mesh_folder, fn)
        if not osp.exists(curr_mesh_folder):
            os.makedirs(curr_mesh_folder)
        #TODO: SMPLifyD and PROX won't work for multiple persons
        for person_id in range(keypoints.shape[0]):
            if person_id >= max_persons and max_persons > 0:
                continue

            curr_result_fn = osp.join(curr_result_folder,
                                      '{:03d}.pkl'.format(person_id))
            curr_mesh_fn = osp.join(curr_mesh_folder,
                                    '{:03d}.ply'.format(person_id))
            curr_body_scene_rendering_fn = osp.join(body_scene_rendering_dir,
                                                    fn + '.png')

            curr_img_folder = osp.join(output_folder, 'images', fn,
                                       '{:03d}'.format(person_id))
            if not osp.exists(curr_img_folder):
                os.makedirs(curr_img_folder)

            if gender_lbl_type != 'none':
                if gender_lbl_type == 'pd' and 'gender_pd' in data:
                    gender = data['gender_pd'][person_id]
                if gender_lbl_type == 'gt' and 'gender_gt' in data:
                    gender = data['gender_gt'][person_id]
            else:
                gender = input_gender

            if gender == 'neutral':
                body_model = neutral_model
            elif gender == 'female':
                body_model = female_model
            elif gender == 'male':
                body_model = male_model

            out_img_fn = osp.join(curr_img_folder, 'output.png')

            fit_single_frame(
                img,
                keypoints[[person_id]],
                init_trans,
                scan,
                cam2world_dir=cam2world_dir,
                scene_dir=scene_dir,
                sdf_dir=sdf_dir,
                body_segments_dir=body_segments_dir,
                scene_name=scene_name,
                body_model=body_model,
                camera=camera,
                joint_weights=joint_weights,
                dtype=dtype,
                output_folder=output_folder,
                result_folder=curr_result_folder,
                out_img_fn=out_img_fn,
                result_fn=curr_result_fn,
                mesh_fn=curr_mesh_fn,
                body_scene_rendering_fn=curr_body_scene_rendering_fn,
                shape_prior=shape_prior,
                expr_prior=expr_prior,
                body_pose_prior=body_pose_prior,
                left_hand_prior=left_hand_prior,
                right_hand_prior=right_hand_prior,
                jaw_prior=jaw_prior,
                angle_prior=angle_prior,
                **args)

    elapsed = time.time() - start
    time_msg = time.strftime('%H hours, %M minutes, %S seconds',
                             time.gmtime(elapsed))
    print('Processing the data took: {}'.format(time_msg))
Esempio n. 3
0
def main(data,gf,param,outfile):
  '''
  Nt: number of time steps
  Nx: number of positions
  Dx: spatial dimensions of coordinates and displacements 
  Ns: number of slip basis functions per slip direction
  Ds: number of slip directions
  Nv: number of fluidity basis functions
  total: number of state parameters (Ns*Ds + Nv + 2*Nx*Dx)

  Parameters
  ----------
    data: \ mask : (Nt,Nx) boolean array
          \ mean : (Nt,Nx,Dx) array
          \ covariance : (Nt,Nx,Dx,Dx) array
          \ metadata \ position : (Nx,Dx) array
                       time : (Nt,) array

    prior: \ mean : (total,) array
           \ covariance : (total,total) array

    gf: \ elastic : (Ns,Ds,Nx,Dx) array
        \ viscoelastic : (Ns,Ds,Dv,Nx,Dx) array
        \ metadata \ position : (Nx,Dx) array

    reg: \ regularization : (*,total) array

    params: user parameter dictionary

  Returns
  -------
    out: \ slip_integral \ mean : (Nt,Ns,Ds) array
                         \ uncertainty :(Nt,Ns,Ds) array
         \ slip \ mean : (Nt,Ns,Ds) array
                \ uncertainty : (Nt,Ns,Ds) array
         \ slip_derivative \ mean : (Nt,Ns,Ds) array
                           \ uncertainty : (Nt,Ns,Ds) array
         \ fluidity \ mean : (Nv,) array
                    \ uncertainty : (Nv,) array
         \ secular_velocity \ mean : (Nx,Dx) array
                            \ uncertainty : (Nx,Dx) array
         \ baseline_displacement \ mean : (Nx,Dx) array
                                 \ uncertainty : (Nx,Dx) array

  '''
  F = gf['slip'][...]
  G = gf['fluidity'][...]
  coseismic_times = np.array(param['coseismic_times'])
  afterslip_start_times = param['afterslip_start_times']
  afterslip_end_times = param['afterslip_end_times']
  afterslip_times = zip(afterslip_start_times,afterslip_end_times) 
  afterslip_times = np.array(afterslip_times)
  time = data['time'][:]

  slip_scale = 1.0 # meter
  relax_scale = 1.0 # year
  time_scale = np.std(time)
  time_shift = np.mean(time) # years
  disp_scale = 1.0 # meter
  # shift is different for each time series
  disp_shift = np.mean(data['mean'],0)

  # scale greens functions
  # F is originally in meters disp per meter slip
  F /= disp_scale/slip_scale
  # G is originally in meters/year disp per meter slip*fluidity 
  G /= (disp_scale/time_scale)/(slip_scale/relax_scale) 
  
  time -= time_shift
  time /= time_scale

  coseismic_times -= time_shift
  coseismic_times /= time_scale
  afterslip_times -= time_shift
  afterslip_times /= time_scale

  param['initial_slip_variance'] /= slip_scale**2
  param['fluidity_variance'] *= relax_scale**2
  param['secular_velocity_variance'] /= (disp_scale/time_scale)**2
  param['baseline_displacement_variance'] /= disp_scale**2

  # define slip functions and slip jacobian here
  slip_func,slip_jac = steps_and_ramps(coseismic_times,
                                       afterslip_times)

  Nst = len(coseismic_times) + len(afterslip_start_times)
  Ns,Ds,Nv,Nx,Dx = np.shape(G)
  Nt = len(time)
  
  # check for consistency between input
  assert data['mean'].shape == (Nt,Nx,Dx)
  assert data['variance'].shape == (Nt,Nx,Dx)
  assert F.shape == (Ns,Ds,Nx,Dx)
  assert G.shape == (Ns,Ds,Nv,Nx,Dx)
  p = state_parser(Nst,Ns,Ds,Nv,Nx,Dx)

  if param['solver'] == 'bvls':
    solver = modest.bvls
    upper_bound = 1e6*np.ones(p['total'])
    lower_bound = -1e6*np.ones(p['total'])

    # all inferred fluidities will be positive
    lower_bound[p['fluidity']] = 0

    # all inferred left lateral slip will be positive
    left_lateral_indices = np.array(p['slip'][:,:,0],copy=True)
    thrust_indices = np.array(p['slip'][:,:,1],copy=True)
    upper_bound[left_lateral_indices] = 0.0
    solver_args = (lower_bound,upper_bound)
    solver_kwargs = {}

  if param['solver'] == 'lstsq':
    solver = modest.lstsq
    solver_args = ()
    solver_kwargs = {}

  elif param['solver'] == 'lsmr':
    solver = modest.lsmr
    solver_args = ()
    solver_kwargs = {}

  elif param['solver'] == 'lgmres':
    solver = modest.lgmres
    solver_args = ()
    solver_kwargs = {'tol':1e-8,'maxiter':1000}

  elif param['solver'] == 'dgs':
    solver = modest.dgs
    solver_args = ()
    solver_kwargs = {}

  #fprior = prior.create_formatted_prior(param,p,slip_model='parameterized')
  Xprior,Cprior = prior.create_prior(param,p,slip_model='parameterized')   
  reg_matrix = reg.create_regularization(param,p,slip_model='parameterized')
  reg_rows = len(reg_matrix)  

  setup_output_file(outfile,p,
                    data['name'][...],
                    data['position'][...],
                    data['time'][...])
  
  Xprior,Cprior = modest.nonlin_lstsq(
                      regularization,
                      np.zeros(reg_rows),
                      Xprior, 
                      data_covariance=np.eye(reg_rows),
                      prior_covariance=Cprior,
                      system_args=(reg_matrix,),
                      jacobian=regularization_jacobian,
                      jacobian_args=(reg_matrix,),
                      maxitr=param['maxitr'],
                      solver=solver, 
                      solver_args=solver_args,
                      solver_kwargs=solver_kwargs,
                      LM_damping=True,
                      output=['solution','solution_covariance'])

  time_indices = range(Nt)
  block_time_indices = modest.misc.divide_list(time_indices,param['time_blocks'])
  for i in block_time_indices:
    outfile['data/mean'][i,...] = data['mean'][i,...]
    outfile['data/mask'][i,...] = data['mask'][i,...]
    outfile['data/covariance'][i,...] = data['variance'][i,...]
    di = data['mean'][i,...]
    di -= disp_shift
    di /= disp_scale

    di_mask = np.array(data['mask'][i,:],dtype=bool)
    # expand to three dimensions
    di_mask = np.repeat(di_mask[...,None],3,-1)
    di = di[~di_mask]
    Cdi = data['variance'][i,...]
    Cdi /= disp_scale**2
    Cdi = Cdi[~di_mask]

    Xprior,Cprior = modest.nonlin_lstsq(
                      observation,
                      di,
                      Xprior, 
                      data_covariance=Cdi,
                      prior_covariance=Cprior,
                      system_args=(time[i],F,G,p,slip_func,di_mask),
                      jacobian=observation_jacobian,
                      jacobian_args=(time[i],F,G,p,slip_func,slip_jac,di_mask),
                      solver=solver, 
                      solver_args=solver_args,
                      solver_kwargs=solver_kwargs,
                      maxitr=param['maxitr'],
                      LM_damping=True,
                      LM_param=1.0,
                      rtol=1e-2,
                      atol=1e-2,
                      output=['solution','solution_covariance'])

  post_mean_scaled,post_cov_scaled = Xprior,Cprior

  post_mean = np.copy(post_mean_scaled)
  post_mean[p['baseline_displacement']] *= disp_scale
  post_mean[p['baseline_displacement']] += disp_shift
  post_mean[p['secular_velocity']] *= (disp_scale/time_scale)  
  post_mean[p['slip']] *= slip_scale
  post_mean[p['fluidity']] /= relax_scale

  for i in range(Nt):
    outfile['state/all'][i,:] = post_mean
    outfile['state/baseline_displacement'][i,...] = post_mean[p['baseline_displacement']]   
    outfile['state/secular_velocity'][i,...] = post_mean[p['secular_velocity']]   
    outfile['state/slip'][i,...] = slip_func(post_mean[p['slip']],time[i])   
    outfile['state/slip_derivative'][i,...] = slip_func(post_mean[p['slip']],time[i],diff=1)
    outfile['state/fluidity'][i,...] = post_mean[p['fluidity']]   

  # compute predicted data
  logger.info('computing predicted data')
  error = 0.0
  count = 0
  for i in range(Nt):
    predicted = observation_t(post_mean_scaled,
                              time[i],
                              F,G,p,slip_func)
    # change back to meters
    predicted *= disp_scale    
    predicted += disp_shift
    residual = outfile['data/mean'][i,...] - predicted
    covariance = outfile['data/covariance'][i,...]
    data_mask = np.array(outfile['data/mask'][i,...],dtype=bool)
    error += L2(residual,covariance,data_mask) 
    count += np.sum(~data_mask)

    outfile['predicted/mean'][i,...] = predicted

    mask = np.zeros(p['total'])
    mask[p['secular_velocity']] = 1.0
    mask[p['baseline_displacement']] = 1.0
    mask_post_mean = post_mean_scaled*mask
    tectonic = observation_t(mask_post_mean,
                             time[i],
                             F,G,p,
                             slip_func)
    tectonic *= disp_scale
    tectonic += disp_shift
    outfile['tectonic/mean'][i,...] = tectonic

    mask = np.zeros(p['total'])
    mask[p['slip']] = 1.0
    mask_post_mean = post_mean_scaled*mask
    elastic = observation_t(mask_post_mean,
                            time[i],
                            F,G,p,
                            slip_func)

    elastic *= disp_scale
    outfile['elastic/mean'][i,...] = elastic 

    visc = (outfile['predicted/mean'][i,...] - 
            outfile['tectonic/mean'][i,...] - 
            outfile['elastic/mean'][i,...])
    outfile['viscous/mean'][i,...] = visc

  logger.info('total RMSE: %s' % np.sqrt(error/count))

  return 
Esempio n. 4
0
def main(**args):
    output_folder = args.pop('output_folder')
    output_folder = osp.expandvars(output_folder)
    if not osp.exists(output_folder):
        os.makedirs(output_folder)

    # Store the arguments for the current experiment
    conf_fn = osp.join(output_folder, 'conf.yaml')
    with open(conf_fn, 'w') as conf_file:
        yaml.dump(args, conf_file)

    result_folder = args.pop('result_folder', 'results')
    result_folder = osp.join(output_folder, result_folder)
    if not osp.exists(result_folder):
        os.makedirs(result_folder)

    mesh_folder = args.pop('mesh_folder', 'meshes')
    mesh_folder = osp.join(output_folder, mesh_folder)
    if not osp.exists(mesh_folder):
        os.makedirs(mesh_folder)

    out_img_folder = osp.join(output_folder, 'images')
    if not osp.exists(out_img_folder):
        os.makedirs(out_img_folder)

    float_dtype = args['float_dtype']
    if float_dtype == 'float64':
        dtype = torch.float64
    elif float_dtype == 'float32':
        dtype = torch.float64
    else:
        print('Unknown float type {}, exiting!'.format(float_dtype))
        sys.exit(-1)

    use_cuda = args.get('use_cuda', True)
    if use_cuda and not torch.cuda.is_available():
        print('CUDA is not available, exiting!')
        sys.exit(-1)

    img_folder = args.pop('img_folder', 'images')
    dataset_obj = create_dataset(img_folder=img_folder, **args)

    start = time.time()

    input_gender = args.pop('gender', 'neutral')
    gender_lbl_type = args.pop('gender_lbl_type', 'none')
    max_persons = args.pop('max_persons', -1)

    float_dtype = args.get('float_dtype', 'float32')
    if float_dtype == 'float64':
        dtype = torch.float64
    elif float_dtype == 'float32':
        dtype = torch.float32
    else:
        raise ValueError('Unknown float type {}, exiting!'.format(float_dtype))

    joint_mapper = JointMapper(dataset_obj.get_model2data())

    model_params = dict(model_path=args.get('model_folder'),
                        joint_mapper=joint_mapper,
                        create_global_orient=True,
                        create_body_pose=not args.get('use_vposer'),
                        create_betas=True,
                        create_left_hand_pose=True,
                        create_right_hand_pose=True,
                        create_expression=True,
                        create_jaw_pose=True,
                        create_leye_pose=True,
                        create_reye_pose=True,
                        create_transl=False,
                        dtype=dtype,
                        **args)

    male_model = smplx.create(gender='male', **model_params)
    # SMPL-H has no gender-neutral model
    if args.get('model_type') != 'smplh':
        neutral_model = smplx.create(gender='neutral', **model_params)
    female_model = smplx.create(gender='female', **model_params)

    # Create the camera object
    focal_length = args.get('focal_length')
    camera = create_camera(focal_length_x=focal_length,
                           focal_length_y=focal_length,
                           dtype=dtype,
                           **args)

    if hasattr(camera, 'rotation'):
        camera.rotation.requires_grad = False

    use_hands = args.get('use_hands', True)
    use_face = args.get('use_face', True)

    body_pose_prior = create_prior(prior_type=args.get('body_prior_type'),
                                   dtype=dtype,
                                   **args)

    jaw_prior, expr_prior = None, None
    if use_face:
        jaw_prior = create_prior(prior_type=args.get('jaw_prior_type'),
                                 dtype=dtype,
                                 **args)
        expr_prior = create_prior(prior_type=args.get('expr_prior_type', 'l2'),
                                  dtype=dtype,
                                  **args)

    left_hand_prior, right_hand_prior = None, None
    if use_hands:
        lhand_args = args.copy()
        lhand_args['num_gaussians'] = args.get('num_pca_comps')
        left_hand_prior = create_prior(
            prior_type=args.get('left_hand_prior_type'),
            dtype=dtype,
            use_left_hand=True,
            **lhand_args)

        rhand_args = args.copy()
        rhand_args['num_gaussians'] = args.get('num_pca_comps')
        right_hand_prior = create_prior(
            prior_type=args.get('right_hand_prior_type'),
            dtype=dtype,
            use_right_hand=True,
            **rhand_args)

    shape_prior = create_prior(prior_type=args.get('shape_prior_type', 'l2'),
                               dtype=dtype,
                               **args)

    angle_prior = create_prior(prior_type='angle', dtype=dtype)

    if use_cuda and torch.cuda.is_available():
        device = torch.device('cuda')

        camera = camera.to(device=device)
        female_model = female_model.to(device=device)
        male_model = male_model.to(device=device)
        if args.get('model_type') != 'smplh':
            neutral_model = neutral_model.to(device=device)
        body_pose_prior = body_pose_prior.to(device=device)
        angle_prior = angle_prior.to(device=device)
        shape_prior = shape_prior.to(device=device)
        if use_face:
            expr_prior = expr_prior.to(device=device)
            jaw_prior = jaw_prior.to(device=device)
        if use_hands:
            left_hand_prior = left_hand_prior.to(device=device)
            right_hand_prior = right_hand_prior.to(device=device)
    else:
        device = torch.device('cpu')

    # A weight for every joint of the model
    joint_weights = dataset_obj.get_joint_weights().to(device=device,
                                                       dtype=dtype)
    # Add a fake batch dimension for broadcasting
    joint_weights.unsqueeze_(dim=0)

    for idx, data in enumerate(dataset_obj):

        img = data['img']
        fn = data['fn']
        keypoints = data['keypoints']
        print('Processing: {}'.format(data['img_path']))

        curr_result_folder = osp.join(result_folder, fn)
        if not osp.exists(curr_result_folder):
            os.makedirs(curr_result_folder)
        curr_mesh_folder = osp.join(mesh_folder, fn)
        if not osp.exists(curr_mesh_folder):
            os.makedirs(curr_mesh_folder)
        for person_id in range(keypoints.shape[0]):
            if person_id >= max_persons and max_persons > 0:
                continue

            curr_result_fn = osp.join(curr_result_folder,
                                      '{:03d}.pkl'.format(person_id))
            curr_mesh_fn = osp.join(curr_mesh_folder,
                                    '{:03d}.obj'.format(person_id))

            curr_img_folder = osp.join(output_folder, 'images', fn,
                                       '{:03d}'.format(person_id))
            if not osp.exists(curr_img_folder):
                os.makedirs(curr_img_folder)

            if gender_lbl_type != 'none':
                if gender_lbl_type == 'pd' and 'gender_pd' in data:
                    gender = data['gender_pd'][person_id]
                if gender_lbl_type == 'gt' and 'gender_gt' in data:
                    gender = data['gender_gt'][person_id]
            else:
                gender = input_gender

            if gender == 'neutral':
                body_model = neutral_model
            elif gender == 'female':
                body_model = female_model
            elif gender == 'male':
                body_model = male_model

            try:
                out_img_fn = osp.join(curr_img_folder,
                                      '{:06d}.png'.format(int(fn)))
            except:
                out_img_fn = osp.join(curr_img_folder, 'output.png')

            fit_single_frame(img,
                             keypoints[[person_id]],
                             body_model=body_model,
                             camera=camera,
                             joint_weights=joint_weights,
                             dtype=dtype,
                             output_folder=output_folder,
                             result_folder=curr_result_folder,
                             out_img_fn=out_img_fn,
                             result_fn=curr_result_fn,
                             mesh_fn=curr_mesh_fn,
                             shape_prior=shape_prior,
                             expr_prior=expr_prior,
                             body_pose_prior=body_pose_prior,
                             left_hand_prior=left_hand_prior,
                             right_hand_prior=right_hand_prior,
                             jaw_prior=jaw_prior,
                             angle_prior=angle_prior,
                             **args)

    elapsed = time.time() - start
    time_msg = time.strftime('%H hours, %M minutes, %S seconds',
                             time.gmtime(elapsed))
    print('Processing the data took: {}'.format(time_msg))
Esempio n. 5
0
def main(data,gf,param,outfile):
  '''
  Nt: number of time steps
  Nx: number of positions
  Dx: spatial dimensions of coordinates and displacements 
  Ns: number of slip basis functions per slip direction
  Ds: number of slip directions
  Nv: number of fluidity basis functions
  total: number of state parameters (Ns*Ds + Nv + 2*Nx*Dx)

  Parameters
  ----------
    data: \ mask : (Nt,Nx) boolean array
          \ mean : (Nt,Nx,Dx) array
          \ covariance : (Nt,Nx,Dx,Dx) array
          \ metadata \ position : (Nx,Dx) array
                       time : (Nt,) array

    prior: \ mean : (total,) array
           \ covariance : (total,total) array

    gf: \ elastic : (Ns,Ds,Nx,Dx) array
        \ viscoelastic : (Ns,Ds,Dv,Nx,Dx) array
        \ metadata \ position : (Nx,Dx) array

    reg: \ regularization : (*,total) array

    params: user parameter dictionary

  Returns
  -------
    out: \ slip_integral \ mean : (Nt,Ns,Ds) array
                         \ uncertainty :(Nt,Ns,Ds) array
         \ slip \ mean : (Nt,Ns,Ds) array
                \ uncertainty : (Nt,Ns,Ds) array
         \ slip_derivative \ mean : (Nt,Ns,Ds) array
                           \ uncertainty : (Nt,Ns,Ds) array
         \ fluidity \ mean : (Nv,) array
                    \ uncertainty : (Nv,) array
         \ secular_velocity \ mean : (Nx,Dx) array
                            \ uncertainty : (Nx,Dx) array
         \ baseline_displacement \ mean : (Nx,Dx) array
                                 \ uncertainty : (Nx,Dx) array

  '''
  # set attributes of outfile
  outfile.attrs.update(param)
  outfile.attrs.update(gf.attrs)
  outfile.attrs.update(data.attrs)
  
  F = gf['slip'][...]
  G = gf['fluidity'][...]

  alpha = param['slip_acceleration_variance']
  jump_factor = param['jump_variance_factor']
  jump_times = param['jump_times']
  time = data['time'][:]

  Ns,Ds,Nv,Nx,Dx = np.shape(G)
  Nt = len(time)

  # check for consistency between input
  assert np.shape(data['mean']) == (Nt,Nx,Dx)
  assert np.shape(data['covariance']) == (Nt,Nx,Dx,Dx)
  assert np.shape(F) == (Ns,Ds,Nx,Dx)
  assert np.shape(G) == (Ns,Ds,Nv,Nx,Dx)
  p = state_parser(Ns,Ds,Nv,Nx,Dx)

  fprior = prior.create_formatted_prior(param,p)
  Xprior,Cprior = prior.create_prior(fprior,p)   

  freg = reg.create_formatted_regularization(param,p)
  reg_matrix = reg.create_regularization(freg,p)
  reg_rows = len(reg_matrix)  

  kalman = modest.KalmanFilter(
             Xprior,Cprior,
             observation_augmented,
             obs_args=(F,G,p,reg_matrix),
             ojac=observation_jacobian_augmented,
             ojac_args=(F,G,p,reg_matrix),
             trans=transition,
             trans_args=(p,),
             tjac=transition_jacobian,   
             tjac_args=(p,),
             pcov=process_covariance,
             pcov_args=(alpha,p),
             core=False,
             solver_kwargs={'maxitr':param['maxitr']},
             temp_file=param['history_file'])

  
  # prepare outfile file

  # copy over data file 
  outfile.create_dataset('data/mean',shape=np.shape(data['mean']))
  outfile.create_dataset('data/mask',shape=np.shape(data['mask']),dtype=bool)
  outfile.create_dataset('data/covariance',shape=np.shape(data['covariance']))
  outfile['data/name'] = data['name'][...]
  outfile['data/position'] = data['position'][...]
  outfile['data/time'] = data['time'][...]

  outfile.create_dataset('predicted/mean',shape=np.shape(data['mean']))
  outfile.create_dataset('predicted/covariance',shape=np.shape(data['covariance']))
  outfile['predicted/name'] = data['name'][...]
  outfile['predicted/mask'] = np.array(0.0*data['mask'][...],dtype=bool)
  outfile['predicted/position'] = data['position'][...]
  outfile['predicted/time'] = data['time'][...]

  outfile.create_dataset('tectonic/mean',shape=np.shape(data['mean']))
  outfile.create_dataset('tectonic/covariance',shape=np.shape(data['covariance']))
  outfile['tectonic/name'] = data['name'][...]
  outfile['tectonic/mask'] = np.array(0.0*data['mask'][...],dtype=bool)
  outfile['tectonic/position'] = data['position'][...]
  outfile['tectonic/time'] = data['time'][...]

  outfile.create_dataset('elastic/mean',shape=np.shape(data['mean']))
  outfile.create_dataset('elastic/covariance',shape=np.shape(data['covariance']))
  outfile['elastic/name'] = data['name'][...]
  outfile['elastic/mask'] = np.array(0.0*data['mask'][...],dtype=bool)
  outfile['elastic/position'] = data['position'][...]
  outfile['elastic/time'] = data['time'][...]

  outfile.create_dataset('viscous/mean',shape=np.shape(data['mean']))
  outfile.create_dataset('viscous/covariance',shape=np.shape(data['covariance']))
  outfile['viscous/name'] = data['name'][...]
  outfile['viscous/mask'] = np.array(0.0*data['mask'][...],dtype=bool)
  outfile['viscous/position'] = data['position'][...]
  outfile['viscous/time'] = data['time'][...]

  a = np.ones((Nt,Nx))
  b = np.eye(3)
  c = 1e-10*np.einsum('ij,kl',a,b)
  outfile['predicted/covariance'][...] = c
  outfile['tectonic/covariance'][...] = c
  outfile['elastic/covariance'][...] = c
  outfile['viscous/covariance'][...] = c

  outfile.create_dataset('state/all',shape=(Nt,p['total']))
  for k in ['baseline_displacement',
            'secular_velocity',
            'slip_integral',
            'slip',
            'slip_derivative',
            'fluidity']:
    outfile.create_dataset('state/' + k,shape=(Nt,)+np.shape(p[k]))

  outfile['state/time'] = data['time'][...]

  logger.info('starting Kalman filter iterations')
  for i in range(Nt):
    outfile['data/mean'][i,...] = data['mean'][i,...]
    outfile['data/mask'][i,...] = data['mask'][i,...]
    outfile['data/covariance'][i,...] = data['covariance'][i,...]
    di = flat_data(data['mean'][i,...])
    di = np.hstack((di,np.zeros(reg_rows)))
    di_mask = np.array(data['mask'][i,:],dtype=bool)
    # expand to three dimensions
    di_mask = np.repeat(di_mask[:,None],3,1)
    di_mask = flat_data(di_mask)
    di_mask = np.hstack((di_mask,np.zeros(reg_rows,dtype=bool)))
    Cdi = flat_cov(data['covariance'][i,...])
    Cdi = scipy.linalg.block_diag(Cdi,np.eye(reg_rows))
    if np.all(time[i] < jump_times):
      kalman.pcov_args = (0.0*alpha,p)
      kalman.next(di,Cdi,time[i],mask=di_mask)
      kalman.pcov_args = (alpha,p)

    elif (i != 0) & np.any((time[i] > jump_times) & (time[i-1] <= jump_times)):
      logger.info('increasing slip variance by %sx when updating '
                  'from t=%s to t=%s' % (jump_factor,time[i-1],time[i]))
      kalman.pcov_args = (jump_factor*alpha,p)
      kalman.next(di,Cdi,time[i],mask=di_mask)
      kalman.pcov_args = (alpha,p)

    else:   
      kalman.next(di,Cdi,time[i],mask=di_mask)

    print(kalman.get_posterior()[0][p['fluidity']])

  logger.info('starting Rauch-Tung-Striebel smoothing')
  kalman.smooth()
  for i in range(Nt):
    outfile['state/all'][i,:] = kalman.history['smooth'][i,:]
    for k in ['baseline_displacement',
              'secular_velocity',
              'slip_integral',
              'slip',
              'slip_derivative',
              'fluidity']:
      outfile['state/' + k][i,...] = np.array(kalman.history['smooth'])[i,p[k]]

  # compute predicted data
  logger.info('computing predicted data')
  error = 0.0
  for i in range(Nt):
    predicted = observation(outfile['state/all'][i],
                            time[i],
                            F,G,p,flatten=False)
    residual = outfile['data/mean'][i,...] - predicted
    covariance = outfile['data/covariance'][i,...]
    data_mask = np.array(outfile['data/mask'][i,...],dtype=bool)
    error += RMSE(residual,covariance,data_mask) 

    outfile['predicted/mean'][i,...] = predicted

    mask = np.zeros(p['total'])
    mask[p['secular_velocity']] = 1.0
    mask[p['baseline_displacement']] = 1.0
    state = outfile['state/all'][i]*mask
    outfile['tectonic/mean'][i,...] = observation(
                                        state,
                                        time[i],
                                        F,G,p,
                                        flatten=False)

    mask = np.zeros(p['total'])
    mask[p['slip']] = 1.0
    state = outfile['state/all'][i]*mask
    outfile['elastic/mean'][i,...] = observation(
                                       state,
                                       time[i],
                                       F,G,p,
                                       flatten=False)
    
    visc = (outfile['predicted/mean'][i,...] - 
            outfile['tectonic/mean'][i,...] - 
            outfile['elastic/mean'][i,...])
    outfile['viscous/mean'][i,...] = visc

  logger.info('total RMSE: %s' % error)

  return 
Esempio n. 6
0
def main(**args):
    output_folder = args.pop('output_folder')
    output_folder = osp.expandvars(output_folder)
    if not osp.exists(output_folder):
        os.makedirs(output_folder)

    # Store the arguments for the current experiment
    conf_fn = osp.join(output_folder, 'conf.yaml')
    with open(conf_fn, 'w') as conf_file:
        yaml.dump(args, conf_file)

    result_folder = args.pop('result_folder', 'results')
    result_folder = osp.join(output_folder, result_folder)
    if not osp.exists(result_folder):
        os.makedirs(result_folder)

    mesh_folder = args.pop('mesh_folder', 'meshes')
    mesh_folder = osp.join(output_folder, mesh_folder)
    if not osp.exists(mesh_folder):
        os.makedirs(mesh_folder)

    float_dtype = args['float_dtype']
    if float_dtype == 'float64':
        dtype = torch.float64
    elif float_dtype == 'float32':
        dtype = torch.float64
    else:
        print('Unknown float type {}, exiting!'.format(float_dtype))
        sys.exit(-1)

    use_cuda = args.get('use_cuda', True)
    if use_cuda and not torch.cuda.is_available():
        print('CUDA is not available, exiting!')
        sys.exit(-1)

    dataset_obj = create_dataset(**args)

    start = time.time()

    input_gender = args.pop('gender', 'neutral')
    gender_lbl_type = args.pop('gender_lbl_type', 'none')
    max_persons = args.pop('max_persons', -1)

    float_dtype = args.get('float_dtype', 'float32')
    if float_dtype == 'float64':
        dtype = torch.float64
    elif float_dtype == 'float32':
        dtype = torch.float32
    else:
        raise ValueError('Unknown float type {}, exiting!'.format(float_dtype))


    body_model = SMPL(args.get('model_folder'), dtype=dtype, joint_regressor_extra=args.get('j_regressor_extra'))

    use_hands = args.get('use_hands', True)
    use_face = args.get('use_face', True)

    body_pose_prior = create_prior(
        prior_type=args.get('body_prior_type'),
        dtype=dtype,
        **args)

    jaw_prior, expr_prior = None, None
    if use_face:
        jaw_prior = create_prior(
            prior_type=args.get('jaw_prior_type'),
            dtype=dtype,
            **args)
        expr_prior = create_prior(
            prior_type=args.get('expr_prior_type', 'l2'),
            dtype=dtype, **args)

    left_hand_prior, right_hand_prior = None, None
    if use_hands:
        lhand_args = args.copy()
        lhand_args['num_gaussians'] = args.get('num_pca_comps')
        left_hand_prior = create_prior(
            prior_type=args.get('left_hand_prior_type'),
            dtype=dtype,
            use_left_hand=True,
            **lhand_args)

        rhand_args = args.copy()
        rhand_args['num_gaussians'] = args.get('num_pca_comps')
        right_hand_prior = create_prior(
            prior_type=args.get('right_hand_prior_type'),
            dtype=dtype,
            use_right_hand=True,
            **rhand_args)

    shape_prior = create_prior(
        prior_type=args.get('shape_prior_type', 'l2'),
        dtype=dtype, **args)

    angle_prior = create_prior(prior_type='angle', dtype=dtype)

    if use_cuda and torch.cuda.is_available():
        device = torch.device('cuda')

        body_model = body_model.to(device=device)
        body_pose_prior = body_pose_prior.to(device=device)
        angle_prior = angle_prior.to(device=device)
        shape_prior = shape_prior.to(device=device)
        if use_face:
            expr_prior = expr_prior.to(device=device)
            jaw_prior = jaw_prior.to(device=device)
        if use_hands:
            left_hand_prior = left_hand_prior.to(device=device)
            right_hand_prior = right_hand_prior.to(device=device)
    else:
        device = torch.device('cpu')

    # A weight for every joint of the model
    joint_weights = dataset_obj.get_joint_weights().to(device=device,
                                                       dtype=dtype)
    # Add a fake batch dimension for broadcasting
    joint_weights.unsqueeze_(dim=0)


    for idx, keypoints_3d in enumerate(dataset_obj):
        curr_result_folder = os.path.join(result_folder, str(idx).zfill(7))
        if not osp.exists(curr_result_folder):
            os.makedirs(curr_result_folder)
        curr_mesh_folder = os.path.join(mesh_folder, str(idx).zfill(7))
        if not osp.exists(curr_mesh_folder):
            os.makedirs(curr_mesh_folder)

        for person_idx in range(len(keypoints_3d)):
            print(f'Processing: Image {idx} Person {person_idx}')
            curr_result_fn = osp.join(curr_result_folder, f'{person_idx:02d}.pkl')
            curr_mesh_fn = osp.join(curr_mesh_folder, f'{person_idx:02d}.obj')

            try:
                fit_single_frame(keypoints_3d[[person_idx]],
                                 body_model=body_model,
                                 joint_weights=joint_weights,
                                 dtype=dtype,
                                 result_fn=curr_result_fn,
                                 mesh_fn=curr_mesh_fn,
                                 shape_prior=shape_prior,
                                 expr_prior=expr_prior,
                                 body_pose_prior=body_pose_prior,
                                 left_hand_prior=left_hand_prior,
                                 right_hand_prior=right_hand_prior,
                                 jaw_prior=jaw_prior,
                                 angle_prior=angle_prior,
                                 **args)

                elapsed = time.time() - start
                time_msg = time.strftime('%H hours, %M minutes, %S seconds',
                                         time.gmtime(elapsed))
                print('Processing the data took: {}'.format(time_msg))

            except:
                print('Some error occured during fitting')
Esempio n. 7
0
def configure(**args):
    output_folder = args.pop('output_folder')
    output_folder = osp.expandvars(output_folder)
    if not osp.exists(output_folder):
        os.makedirs(output_folder)

    # Store the arguments for the current experiment
    conf_fn = osp.join(output_folder, 'conf.yaml')
    with open(conf_fn, 'w') as conf_file:
        yaml.dump(args, conf_file)

    result_folder = args.pop('result_folder', 'results')
    result_folder = osp.join(output_folder, result_folder)
    if not osp.exists(result_folder):
        os.makedirs(result_folder)

    mesh_folder = args.pop('mesh_folder', 'meshes')
    mesh_folder = osp.join(output_folder, mesh_folder)
    if not osp.exists(mesh_folder):
        os.makedirs(mesh_folder)

    out_img_folder = osp.join(output_folder, 'images')
    if not osp.exists(out_img_folder):
        os.makedirs(out_img_folder)

    float_dtype = args['float_dtype']
    if float_dtype == 'float64':
        dtype = torch.float64
    elif float_dtype == 'float32':
        dtype = torch.float64
    else:
        print('Unknown float type {}, exiting!'.format(float_dtype))
        sys.exit(-1)

    use_cuda = args.get('use_cuda', True)
    if use_cuda and not torch.cuda.is_available():
        print('CUDA is not available, exiting!')
        sys.exit(-1)
    else:
        print("no cuda")

    # Creates a dataset with a non existing folder, is just used for other configuration
    img_folder = args.pop('img_folder', 'images')
    dataset_obj = create_dataset(img_folder='images', **args)

    input_gender = args.pop('gender', 'neutral')
    gender_lbl_type = args.pop('gender_lbl_type', 'none')
    max_persons = args.pop('max_persons', -1)

    float_dtype = args.get('float_dtype', 'float32')
    if float_dtype == 'float64':
        dtype = torch.float64
    elif float_dtype == 'float32':
        dtype = torch.float32
    else:
        raise ValueError('Unknown float type {}, exiting!'.format(float_dtype))

    joint_mapper = JointMapper(dataset_obj.get_model2data())

    model_params = dict(model_path=args.get('model_folder'),
                        joint_mapper=joint_mapper,
                        create_global_orient=True,
                        create_body_pose=not args.get('use_vposer'),
                        create_betas=True,
                        create_left_hand_pose=True,
                        create_right_hand_pose=True,
                        create_expression=True,
                        create_jaw_pose=True,
                        create_leye_pose=True,
                        create_reye_pose=True,
                        create_transl=False,
                        dtype=dtype,
                        **args)

    male_model = smplx.create(gender='male', **model_params)
    # SMPL-H has no gender-neutral model
    if args.get('model_type') != 'smplh':
        neutral_model = smplx.create(gender='neutral', **model_params)
    female_model = smplx.create(gender='female', **model_params)

    # Create the camera object
    focal_length = args.get('focal_length')
    camera = create_camera(focal_length_x=focal_length,
                           focal_length_y=focal_length,
                           dtype=dtype,
                           **args)

    if hasattr(camera, 'rotation'):
        camera.rotation.requires_grad = False

    use_hands = args.get('use_hands', True)
    use_face = args.get('use_face', True)

    body_pose_prior = create_prior(prior_type=args.get('body_prior_type'),
                                   dtype=dtype,
                                   **args)

    jaw_prior, expr_prior = None, None
    if use_face:
        jaw_prior = create_prior(prior_type=args.get('jaw_prior_type'),
                                 dtype=dtype,
                                 **args)
        expr_prior = create_prior(prior_type=args.get('expr_prior_type', 'l2'),
                                  dtype=dtype,
                                  **args)

    left_hand_prior, right_hand_prior = None, None
    if use_hands:
        lhand_args = args.copy()
        lhand_args['num_gaussians'] = args.get('num_pca_comps')
        left_hand_prior = create_prior(
            prior_type=args.get('left_hand_prior_type'),
            dtype=dtype,
            use_left_hand=True,
            **lhand_args)

        rhand_args = args.copy()
        rhand_args['num_gaussians'] = args.get('num_pca_comps')
        right_hand_prior = create_prior(
            prior_type=args.get('right_hand_prior_type'),
            dtype=dtype,
            use_right_hand=True,
            **rhand_args)

    shape_prior = create_prior(prior_type=args.get('shape_prior_type', 'l2'),
                               dtype=dtype,
                               **args)

    angle_prior = create_prior(prior_type='angle', dtype=dtype)

    if use_cuda and torch.cuda.is_available():
        device = torch.device('cuda')

        camera = camera.to(device=device)
        female_model = female_model.to(device=device)
        male_model = male_model.to(device=device)
        if args.get('model_type') != 'smplh':
            neutral_model = neutral_model.to(device=device)
        body_pose_prior = body_pose_prior.to(device=device)
        angle_prior = angle_prior.to(device=device)
        shape_prior = shape_prior.to(device=device)
        if use_face:
            expr_prior = expr_prior.to(device=device)
            jaw_prior = jaw_prior.to(device=device)
        if use_hands:
            left_hand_prior = left_hand_prior.to(device=device)
            right_hand_prior = right_hand_prior.to(device=device)
    else:
        device = torch.device('cpu')

    # A weight for every joint of the model
    joint_weights = dataset_obj.get_joint_weights().to(device=device,
                                                       dtype=dtype)
    # Add a fake batch dimension for broadcasting
    joint_weights.unsqueeze_(dim=0)

    return {
        'neutral_model': neutral_model,
        'camera': camera,
        'joint_weights': joint_weights,
        'body_pose_prior': body_pose_prior,
        'jaw_prior': jaw_prior,
        'left_hand_prior': left_hand_prior,
        'right_hand_prior': right_hand_prior,
        'shape_prior': shape_prior,
        'expr_prior': expr_prior,
        'angle_prior': angle_prior
    }