Пример #1
0
def prepare_dataset(opt):
    """
    Prepare PyTorch dataset objects used for training 2D-to-3D deep network
    
    Args
        opt: experiment options
    Returns
        train_dataset: training dataset as PyTorch dataset object
        eval_dataset: evaluation dataset as PyTorch dataset object 
        data_stats: dataset statistics computed from the training dataset
        action_eval_list: a list of evaluation dataset objects where each 
        corresponds to one action
    """
    # get relevant paths
    data_dir = opt.data_dir
    cameras_path = os.path.join(data_dir, 'cameras.npy')
    # By default, all actions are used
    actions = define_actions(opt.actions)
    # load camera parameters to project 3D skeleton
    rcams = np.load(cameras_path).item()
    # produce more camera views by adding virtual cameras if needed
    if opt.virtual_cams:
        rcams = add_virtual_cams(rcams)
    # first prepare Python dictionary containing 2D and 3D data
    data_dic, data_stats = prepare_data_dict(rcams, opt, predict_14=False)
    input_size = len(data_stats['dim_use_2d'])
    output_size = len(data_stats['dim_use_3d'])
    # convert Python dictionary to numpy array
    train_input, train_output = get_all_data(data_dic['train_set_2d'],
                                             data_dic['train_set_3d'],
                                             camera_frame,
                                             norm_twoD=opt.norm_twoD,
                                             input_size=input_size,
                                             output_size=output_size)

    eval_input, eval_output = get_all_data(data_dic['test_set_2d'],
                                           data_dic['test_set_3d'],
                                           camera_frame,
                                           norm_twoD=opt.norm_twoD,
                                           input_size=input_size,
                                           output_size=output_size)
    # The Numpy arrays are finally used to initialize the dataset objects
    train_dataset = dataset.PoseDataset(train_input,
                                        train_output,
                                        'train',
                                        refine_3d=opt.refine_3d)
    eval_dataset = dataset.PoseDataset(eval_input,
                                       eval_output,
                                       'eval',
                                       refine_3d=opt.refine_3d)
    # Create a list of dataset objects for action-wise evaluation
    action_eval_list = split_action(data_dic['test_set_2d'],
                                    data_dic['test_set_3d'],
                                    actions,
                                    camera_frame,
                                    opt,
                                    input_size=input_size,
                                    output_size=output_size)

    return train_dataset, eval_dataset, data_stats, action_eval_list
Пример #2
0
def split_action(dic_2d, dic_3d, actions, camera_frame, opt, input_size, output_size):
    """
    Generate a list of datasets for each action.
    
    Args
        dic_2d: dictionary containing 2d poses
        dic_3d: dictionary containing 3d poses
        actions: list of defined actions
        camera_frame: use camera coordinate system
        opt: experiment options
        input_size: input vector length
        output_size: output vector length
    Returns
        action_dataset_list: a list of datasets where each element correspond
        to one action   
    """
    action_dataset_list = []
    for act_id in range(len(actions)):
        action = actions[act_id]
        dic_2d_action, dic_3d_action = select_action(dic_2d, dic_3d, action, opt.twoD_source)
        eval_input, eval_output = get_all_data(dic_2d_action, 
                                               dic_3d_action,
                                               camera_frame, 
                                               norm_twoD=opt.norm_twoD,
                                               input_size=input_size,
                                               output_size=output_size)
        action_dataset = dataset.PoseDataset(eval_input, 
                                             eval_output, 
                                             'eval', 
                                             action_name=action,
                                             refine_3d=opt.refine_3d)
        action_dataset_list.append(action_dataset)
    return action_dataset_list
Пример #3
0
def split_action(dic_2d, dic_3d, actions, camera_frame, opt, input_size,
                 output_size):
    """
    Generate a list of datasets for each action.
    """
    action_dataset_list = []
    for act_id in range(len(actions)):
        action = actions[act_id]
        dic_2d_action, dic_3d_action = select_action(dic_2d, dic_3d, action,
                                                     opt.twoD_source)
        eval_input, eval_output = get_all_data(dic_2d_action,
                                               dic_3d_action,
                                               camera_frame,
                                               norm_twoD=opt.norm_twoD,
                                               input_size=input_size,
                                               output_size=output_size)
        action_dataset = dataset.PoseDataset(eval_input,
                                             eval_output,
                                             'eval',
                                             action_name=action,
                                             refine_3d=opt.refine_3d)
        action_dataset_list.append(action_dataset)
    return action_dataset_list