コード例 #1
0
ファイル: speechDataset.py プロジェクト: zhouzyhfut/AVEC2019
 def __getitem__(self, idx):
     path = self.filePaths[idx]
     labels = self.labels[idx]
     frames = load_features(path,
                            skip_header=self.skip_header,
                            skip_instname=self.skip_instname,
                            delim=self.delim)
     if self.noTimeStamps:
         frames = frames[:, 1:]
     if self.shouldNormalize:
         frames = self.normalize(frames)
     frames = np.array(frames, dtype='f')
     # print(frames)
     return frames, np.array(labels, dtype=self.labeldtype)
コード例 #2
0
def parse_audio_files(parent_dir):
    features = []
    labels = np.empty(0)  #273: number of features, ori:193
    for fn in os.listdir(parent_dir):
        print('process...', fn)
        feature_i = load_features(parent_dir + fn, delim=';')
        if feature_i.shape[0] < lenmin:
            feature_i = np.vstack([
                feature_i,
                np.zeros((lenmin - feature_i.shape[0], feature_i.shape[1]))
            ])
        feature_i = feature_i[:, :lenmin]
        features.append(feature_i[:, 1:])
        filename = ntpath.basename(fn)
        labels = np.append(labels, filename.split('-')[2])  # grab 3rd item
    return np.array(features), np.array(labels, dtype=np.int)  #
コード例 #3
0
ファイル: generate_xbow.py プロジェクト: wuhao1999/AVEC2018
    os.mkdir(folder_bovw)

for fn in [
        'Train_audio.csv', 'Train_visual.csv', 'Codebook_audio.txt',
        'Codebook_visual.txt'
]:
    if os.path.exists(temporary_folder + fn):
        os.remove(temporary_folder + fn)

# Create codebooks
print('Concatenating training feature files ...')
codebook_audio = temporary_folder + 'Codebook_audio.txt'
codebook_visual = temporary_folder + 'Codebook_visual.txt'

for fn in files_train:
    features = load_features(folder_audio_features + fn)
    save_features(temporary_folder + 'Train_audio.csv',
                  features,
                  append=True,
                  instname=fn)
    features = load_features(folder_visual_features + fn)
    save_features(temporary_folder + 'Train_visual.csv',
                  features,
                  append=True,
                  instname=fn)

print('Generating codebook file ' + codebook_audio)
os.system('java -jar ' + jar_openxbow + ' -B ' + codebook_audio + ' -i ' +
          temporary_folder + 'Train_audio.csv' + ' ' + openxbow_options_audio +
          ' ' + openxbow_options_codebook_audio)
print('Generating codebook file ' + codebook_visual)
コード例 #4
0
hop_size = 0.1  # hop size of the labels TODO
max_seq_len = 1768  # TODO, this should at least the maximum number of labels per file (=duration / hop_size), but can be larger as files are cropped later on (just used for allocation)

# Get all files
files = fnmatch.filter(os.listdir(folder_lld_features[0]),
                       '*.csv')  # filenames are the same for all modalities
files.sort()

# Generate files with functionals for all modalities and train/devel/test
for m in range(0, len(folder_lld_features)):
    if not os.path.exists(folder_functionals[m]):
        os.mkdir(folder_functionals[m])

    for fn in files:
        X = load_features(folder_lld_features[m] + fn,
                          skip_header=True,
                          skip_instname=True,
                          delim=';')
        print(fn)
        print(X.shape)
        num_llds = X.shape[1] - 1  # (time stamp is not considered as feature)
        X_func = np.zeros(
            (max_seq_len, num_llds * 2))  # mean + stddev for each LLD
        window_size_half = int(window_size * fps[m] / 2)

        time_stamps_new = np.empty((max_seq_len, 1))
        for t in range(0, max_seq_len):
            t_orig = int(t * fps[m] * hop_size)
            min_orig = max(0, t_orig - window_size_half)
            max_orig = min(X.shape[0], t_orig + window_size_half + 1)
            if min_orig < max_orig and t_orig <= X.shape[
                    0]:  # X can be smaller, do not consider
コード例 #5
0
for m in range(0, len(folder_lld_features)):
    folder_in = folder_lld_features[m]
    folder_out = folder_xbow[m]
    xbow_options = openxbow_options[m]
    xbow_codebook = openxbow_options_codebook[m]
    training_files = temporary_folder + train_concat[m]
    codebook = temporary_folder + codebooks[m]

    if not os.path.exists(folder_xbow[m]):
        os.mkdir(folder_xbow[m])

    # Create codebooks
    if len(files_codebook) > 0:
        print('Concatenating feature files for codebook ...')
        for fn in files_codebook:
            features = load_features(folder_in + fn)
            save_features(training_files, features, append=True, instname=fn)

        print('Generating codebook file ' + codebook)
        os.system('java -d64 -Xmx8g -jar ' + jar_openxbow + ' -B ' + codebook +
                  ' -i ' + training_files + ' ' + xbow_options + ' ' +
                  xbow_codebook)

    # Generate XBOW files
    print('Generating BoAW and BoVW feature files for all files ...')
    for fn in all_files:
        print('Generating BoAW and BoVW for file ' + fn + ' ...')
        os.system('java -d64 -Xmx8g -jar ' + jar_openxbow + ' -b ' + codebook +
                  ' -i ' + folder_in + fn + ' -o ' + folder_out + fn + ' ' +
                  xbow_options)
コード例 #6
0
    os.mkdir(folder_output)

# Header for visual feature files (FAUs)
header_output_file = 'name;frameTime;confidence;AU01_r;AU02_r;AU04_r;AU05_r;AU06_r;AU07_r;AU09_r;AU10_r;AU12_r;AU14_r;AU15_r;AU17_r;AU20_r;AU23_r;AU25_r;AU26_r;AU45_r'  # 17 AU intensities

for fn in os.listdir(folder_data):
    infilename = folder_data + fn
    instname = os.path.splitext(fn)[0]
    outfilename = folder_output + instname + '.csv'
    openface_call = exe_openface + ' ' + conf_openface + ' -f ' + infilename + ' -out_dir ' + folder_output
    os.system(openface_call)
    time.sleep(0.01)

    # Re-format files (as required by, e.g., openXBOW)
    features = load_features(outfilename,
                             skip_header=True,
                             skip_instname=False,
                             delim=',')
    features = np.delete(features, [
        0, 1, 4, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
        37, 38, 39
    ], 1)  # removing: frame, face_id, confidence, FAU present (c, 1/0)
    save_features(outfilename,
                  features,
                  append=False,
                  instname=instname,
                  header=header_output_file,
                  delim=';',
                  precision=3)

    # Remove details file
    os.remove(folder_output + instname + '_of_details.txt')