Beispiel #1
0
def create_z_data( par ):

    # Load data
    train_set, train_keypts, _ = load.load_3D( par['data_dir'], subjects=par['train_subjects'], actions=par['actions'] )
    test_set, test_keypts, _  = load.load_3D( par['data_dir'], subjects=par['test_subjects'],  actions=par['actions'] )
  
    #rotate to align with 2D
    train_set = transform.Z_coord( train_set)
    test_set  = transform.Z_coord( test_set )
  
    # anchor to root points
    train_set, _ = utils.anchor_to_root( train_set, par['roots'], par['target_sets'], par['out_dim'])
    test_set, offset = utils.anchor_to_root( test_set, par['roots'], par['target_sets'], par['out_dim'])

    # Divide every dimension independently
    mean, std = stat.normalization_stats( train_set)
    train_set = stat.normalize( train_set, mean, std )
    test_set  = stat.normalize( test_set,  mean, std )
  
    #select coordinates to be predicted and return them as 'targets_1d'
    train_set, _ = utils.remove_roots(train_set, par['target_sets'], par['out_dim'])
    test_set, targets_1d = utils.remove_roots(test_set, par['target_sets'], par['out_dim'])
    
    for key in train_keypts.keys():
        train_keypts[key] = train_keypts[key][:,targets_1d]
    for key in test_keypts.keys():
        test_keypts[key] = test_keypts[key][:,targets_1d]
      
    return train_set, test_set, mean, std, train_keypts, test_keypts, targets_1d, offset
def read_2d_predictions(par):
    """
    Pipeline for processing 2D data (stacked hourglass predictions)
    """

    # Load data
    train = load.load_2D(par['data_dir'],
                         par,
                         cam_id=par['cam_id'],
                         subjects=par['train_subjects'],
                         actions=par['actions'])
    test = load.load_2D(par['data_dir'],
                        par,
                        cam_id=par['cam_id'],
                        subjects=par['test_subjects'],
                        actions=par['actions'])

    # anchor points to body-coxa (to predict legjoints wrt body-boxas)
    train, _ = utils.anchor_to_root(train, par['roots'], par['target_sets'],
                                    par['in_dim'])
    test, offset = utils.anchor_to_root(test, par['roots'], par['target_sets'],
                                        par['in_dim'])

    # Standardize each dimension independently
    mean, std = stat.normalization_stats(train)
    train = stat.normalize(train, mean, std)
    test = stat.normalize(test, mean, std)

    #select coordinates to be predicted and return them as 'targets'
    train, _ = utils.remove_roots(train, par['target_sets'], par['in_dim'])
    test, targets = utils.remove_roots(test, par['target_sets'], par['in_dim'])

    return train, test, mean, std, targets
Beispiel #3
0
def create_xy_data( par ):
    """
    Creates 2d poses by projecting 3d poses with the corresponding camera
    parameters.
    """

    # Load data
    train_set, _, _ = load.load_3D( par['data_dir'], subjects=par['train_subjects'], actions=par['actions'] )
    test_set,  _, _ = load.load_3D( par['data_dir'], subjects=par['test_subjects'],  actions=par['actions'] )
  
    #project data to ventral view
    train_set = transform.XY_coord( train_set )
    test_set  = transform.XY_coord( test_set )

    # anchor to root points
    train_set, _ = utils.anchor_to_root( train_set, par['roots'], par['target_sets'], par['in_dim'])
    test_set, offset = utils.anchor_to_root( test_set, par['roots'], par['target_sets'], par['in_dim'])
    
    # Divide every dimension independently
    mean, std = stat.normalization_stats( train_set )
    train_set = stat.normalize( train_set, mean, std )
    test_set  = stat.normalize( test_set,  mean, std )
  
    #select coordinates to be predicted and return them as 'targets_3d'
    train_set, _ = utils.remove_roots( train_set, par['target_sets'], par['in_dim'] )
    test_set, targets_2d = utils.remove_roots( test_set, par['target_sets'], par['in_dim'] )
    
    return train_set, test_set, mean, std, targets_2d, offset
def read_3d_data(par):
    """
    Pipeline for processing 3D ground-truth data
    """

    # Load data
    train, _, rcams_train = load.load_3D(par['data_dir'],
                                         par,
                                         cam_id=par['cam_id'],
                                         subjects=par['train_subjects'],
                                         actions=par['actions'])
    test, _, rcams_test = load.load_3D(par['data_dir'],
                                       par,
                                       cam_id=par['cam_id'],
                                       subjects=par['test_subjects'],
                                       actions=par['actions'])

    #transform to camera coordinates
    train = transform_frame(train, rcams_train)
    test = transform_frame(test, rcams_test)

    # anchor points to body-coxa (to predict legjoints wrt body-coxas)
    train, _ = utils.anchor_to_root(train, par['roots'], par['target_sets'],
                                    par['out_dim'])
    test, offset = utils.anchor_to_root(test, par['roots'], par['target_sets'],
                                        par['out_dim'])

    # Standardize each dimension independently
    mean, std = stat.normalization_stats(train)
    train = stat.normalize(train, mean, std)
    test = stat.normalize(test, mean, std)

    #select coordinates to be predicted and return them as 'targets_3d'
    train, _ = utils.remove_roots(train, par['target_sets'], par['out_dim'])
    test, targets_3d = utils.remove_roots(test, par['target_sets'],
                                          par['out_dim'])

    return train, test, mean, std, targets_3d, rcams_test, offset