コード例 #1
0
def gendata(data_path,
            out_path,
            ignored_sample_path=None,
            benchmark='xview',
            part='eval'):
    if ignored_sample_path != None:
        with open(ignored_sample_path, 'r') as f:
            ignored_samples = [
                line.strip() + '.skeleton' for line in f.readlines()
            ]
    else:
        ignored_samples = []

    sample_name = []
    sample_label = []
    for filename in os.listdir(data_path):
        if filename in ignored_samples:
            continue
        action_class = int(filename[filename.find('A') + 1:filename.find('A') +
                                    4])
        subject_id = int(filename[filename.find('P') + 1:filename.find('P') +
                                  4])
        camera_id = int(filename[filename.find('C') + 1:filename.find('C') +
                                 4])

        if benchmark == 'xview':
            istraining = (camera_id in training_cameras)
        elif benchmark == 'xsub':
            istraining = (subject_id in training_subjects)
        else:
            raise ValueError(
                'Invalid benchmark provided: {}'.format(benchmark))

        if part == 'train':
            issample = istraining
        elif part == 'val':
            issample = not (istraining)
        else:
            raise ValueError('Invalid data part provided: {}'.format(part))

        if issample:
            sample_name.append(filename)
            sample_label.append(action_class - 1)

    with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f:
        pickle.dump((sample_name, list(sample_label)), f)

    # Create data tensor with shape (# examples (N), C, T, V, M)
    fp = np.zeros((len(sample_label), 3, max_frame, num_joint, max_body_true),
                  dtype=np.float32)

    # Fill in the data tensor `fp` one training example a time
    for i, s in enumerate(tqdm(sample_name)):
        data = read_xyz(os.path.join(data_path, s),
                        max_body=max_body_kinect,
                        num_joint=num_joint)
        fp[i, :, :data.shape[1], :, :] = data

    fp = pre_normalization(fp)
    np.save('{}/{}_data_joint.npy'.format(out_path, part), fp)
コード例 #2
0
ファイル: ntu120_gendata.py プロジェクト: lqjlqj1997/ST-AAE
def gendata(file_list, out_path, ignored_sample_path, benchmark, part):
    ignored_samples = []
    if ignored_sample_path is not None:
        with open(ignored_sample_path, 'r') as f:
            ignored_samples = [
                line.strip() + '.skeleton' for line in f.readlines()
            ]

    sample_name = []
    sample_label = []
    sample_paths = []
    for folder, filename in sorted(file_list):
        if filename in ignored_samples:
            continue

        path = os.path.join(folder, filename)
        setup_loc = filename.find('S')
        subject_loc = filename.find('P')
        action_loc = filename.find('A')
        setup_id = int(filename[(setup_loc + 1):(setup_loc + 4)])
        subject_id = int(filename[(subject_loc + 1):(subject_loc + 4)])
        action_class = int(filename[(action_loc + 1):(action_loc + 4)])

        if benchmark == 'xsub':
            istraining = (subject_id in training_subjects)
        elif benchmark == 'xset':
            istraining = (setup_id in training_setups)
        else:
            raise ValueError(f'Unsupported benchmark: {benchmark}')

        if part == 'train':
            issample = istraining
        elif part == 'val':
            issample = not (istraining)
        else:
            raise ValueError(f'Unsupported dataset part: {part}')

        if issample:
            sample_paths.append(path)
            sample_label.append(action_class - 1)  # to 0-indexed

    # Save labels
    with open(f'{out_path}/{part}_label.pkl', 'wb') as f:
        pickle.dump((sample_paths, list(sample_label)), f)

    # Create data tensor (N,C,T,V,M)
    fp = np.zeros((len(sample_label), 3, max_frame, num_joint, max_body_true),
                  dtype=np.float32)

    # Fill in the data tensor `fp` one training example a time
    for i, s in enumerate(tqdm(sample_paths, dynamic_ncols=True)):
        data = read_xyz(s, max_body=max_body_kinect, num_joint=num_joint)
        # Fill (C,T,V,M) to data tensor (N,C,T,V,M)
        fp[i, :, 0:data.shape[1], :, :] = data

    # Perform preprocessing on data tensor
    fp = pre_normalization(fp)
    # Save input data (train/val)
    np.save('{}/{}_data_joint.npy'.format(out_path, part), fp)
コード例 #3
0
def gendata(data_path,
            out_path,
            ignored_sample_path=None,
            benchmark='xview',
            part='eval'):
    if ignored_sample_path != None:
        with open(ignored_sample_path, 'r') as f:
            ignored_samples = [
                line.strip() + '.skeleton' for line in f.readlines()
            ]
    else:
        ignored_samples = []
    sample_name = []
    sample_label = []
    for filename in os.listdir(data_path):
        if filename in ignored_samples:
            continue
        action_class = int(filename[filename.find('A') + 1:filename.find('A') +
                                    4])

        if action_class > 60:  #60=>11, 61=>12
            continue
        #action_class -= 49 #55->6, 56->7, 58 -> 9 action_map = {"hand_shake":"58", "pass_object":"56", "hug":"55"}

        #subject_id = int(
        #    filename[filename.find('P') + 1:filename.find('P') + 4])
        camera_id = int(filename[filename.find('C') + 1:filename.find('C') +
                                 4])

        #istraining = (camera_id != 1)
        istraining = (camera_id != 4)

        if part == 'train':
            issample = istraining
        elif part == 'val':
            issample = not (istraining)
        else:
            raise ValueError()

        if issample:
            sample_name.append(filename)
            sample_label.append(action_class - 1)

    with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f:
        pickle.dump((sample_name, list(sample_label)), f)

    fp = np.zeros((len(sample_label), 3, max_frame, num_joint, max_body_true),
                  dtype=np.float32)

    for i, s in enumerate(tqdm(sample_name)):
        data = read_xyz(os.path.join(data_path, s),
                        max_body=max_body_kinect,
                        num_joint=num_joint)
        #def read_xyz(file, max_body=4, num_joint=25):
        fp[i, :, 0:data.shape[1], :, :] = data

    fp = pre_normalization(fp)
    np.save('{}/{}_data_joint.npy'.format(out_path, part), fp)
コード例 #4
0
def gendata(data_path,
            out_path,
            ignored_sample_path=None,
            benchmark='xview',
            part='eval'):
    if ignored_sample_path != None:
        with open(ignored_sample_path, 'r') as f:
            ignored_samples = [
                line.strip() + '.skeleton' for line in f.readlines()
            ]
    else:
        ignored_samples = []
    sample_name = []
    sample_label = []
    sample_info = []
    for filename in os.listdir(data_path):
        if filename in ignored_samples:
            continue
        info = extract(filename)

        if benchmark == 'xview':
            istraining = (info[1] in training_cameras)
        elif benchmark == 'xsub':
            istraining = (info[2] in training_subjects)
        else:
            raise ValueError()

        if part == 'train':
            issample = istraining
        elif part == 'test':
            issample = not (istraining)
        else:
            raise ValueError()

        if issample:
            sample_name.append(filename)
            sample_label.append(info[4] - 1)
            sample_info.append(info)

    with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f:
        pickle.dump((sample_info, list(sample_label)), f)

    fp = np.zeros((len(sample_label), 3, max_frame, num_joint, max_body_true),
                  dtype=np.float32)

    for i, s in enumerate(tqdm(sample_name)):
        data = read_xyz(os.path.join(data_path, s),
                        max_body=max_body_kinect,
                        num_joint=num_joint)
        fp[i, :, 0:data.shape[1], :, :] = data

    fp = pre_normalization(fp)
    np.save('{}/{}_data_joint.npy'.format(out_path, part), fp)
コード例 #5
0
def gendata(data_path, out_path, ignored_sample_path=None, benchmark='xsub', set_name='val'):
    if ignored_sample_path != None:
        with open(ignored_sample_path, 'r') as f:
            ignored_samples = [line.strip() + '.skeleton' for line in f.readlines()]
    else:
        ignored_samples = []
    sample_name = []
    sample_label = []
    for filename in os.listdir(data_path):
        if filename in ignored_samples:
            continue
        action_class = int(filename[filename.find('A') + 1:filename.find('A') + 4])
        subject_id = int(filename[filename.find('P') + 1:filename.find('P') + 4])
        camera_id = int(filename[filename.find('C') + 1:filename.find('C') + 4])

        if benchmark == 'xview':
            istraining = (camera_id in training_cameras)
        elif benchmark == 'xsub':
            istraining = (subject_id in training_subjects)
        else:
            raise ValueError()

        if set_name == 'train':
            issample = istraining
        elif set_name == 'val':
            issample = not (istraining)
        else:
            raise ValueError()

        if issample:
            sample_name.append(filename)
            sample_label.append(action_class - 1)
            print(len(sample_label))

    with open('{}/{}_label.pkl'.format(out_path, set_name), 'wb') as f:
        pickle.dump((sample_name, list(sample_label)), f)

    fp = np.zeros((len(sample_label), 3, max_frame, num_joint, max_body_true), dtype=np.float32)

    for i, s in enumerate(tqdm(sample_name)):
        print(s)
        data = read_xyz(os.path.join(data_path, s), max_body=max_body_kinect, num_joint=num_joint)
        fp[i, :, 0:data.shape[1], :, :] = data

    fp = pre_normalization(fp)
    np.save('{}/{}_data_joint_pad.npy'.format(out_path, set_name), fp)
コード例 #6
0
def gendata(data_path,
            out_path,
            ignored_sample_path=None,
            benchmark='xview',
            part='eval'):
    if ignored_sample_path != None:
        with open(ignored_sample_path, 'r') as f:
            ignored_samples = [
                line.strip() + '.skeleton' for line in f.readlines()
            ]
    else:
        ignored_samples = []
    sample_name = []
    sample_label = []
    for filename in os.listdir(data_path):
        if filename in ignored_samples:
            continue
        action_class = int(filename[filename.find('A') + 1:filename.find('A') +
                                    4])
        subject_id = int(filename[filename.find('P') + 1:filename.find('P') +
                                  4])
        camera_id = int(filename[filename.find('C') + 1:filename.find('C') +
                                 4])

        ############################################################
        action_class -= 49  #50->1 #50-60->1-11   #50-65 => 1-16
        if action_class < 1:
            continue
        set_up = int(filename[filename.find('S') + 1:filename.find('S') + 4])
        if set_up > 5:
            break
        ############################################################

        if benchmark == 'xview':
            istraining = (camera_id in training_cameras)
        elif benchmark == 'xsub':
            istraining = (subject_id in training_subjects)
        else:
            raise ValueError()

        if part == 'train':
            issample = istraining
        elif part == 'val':
            issample = not (istraining)
        else:
            raise ValueError()

        if issample:
            sample_name.append(filename)
            sample_label.append(action_class - 1)

    with open('{}/{}_label.pkl'.format(out_path, part), 'wb') as f:  #?????
        pickle.dump((sample_name, list(sample_label)), f)

    fp = np.zeros((len(sample_label), 3, max_frame, num_joint, max_body_true),
                  dtype=np.float32)  #N,C,T,V,M

    for i, s in enumerate(tqdm(sample_name)):
        data = read_xyz(os.path.join(data_path, s),
                        max_body=max_body_kinect,
                        num_joint=num_joint)
        fp[i, :, 0:data.shape[1], :, :] = data  #50000,3,300,25,2

    fp = pre_normalization(fp)  #preprocess.py
    np.save('{}/{}_data_joint.npy'.format(out_path, part), fp)