Beispiel #1
0
def process_aggreagtes() -> None:
    sets = {
        f.file_path: f
        for ds in (datasets.get_emovo(), datasets.get_ravdess(), datasets.get_cafe(), datasets.get_berlin(), datasets.get_tess())
        for f in ds
    }

    with open(AGGREGATES_PATH) as f, open(AGGREGATES_COPY_PATH, 'w+') as new:
        it = iter(f)
        next(f)

        for line in it:
            index = line.index(';')
            path = line[:index].strip("'")
            if not path.endswith('wav'):
                continue

            info = sets[path]
            new_line = (
                # sry for this
                "'" + path[len(r'C:\Projects\Emotions\learning\\') - 1:] +
                f"';{str(info.emotion.value)};{info.dataset};{info.actor};{info.gender.value};{info.intensity.value};{info.age.value}" +
                line[index:]
            )

            new.write(new_line)
Beispiel #2
0
        new_header = ';'.join(header[:1] + [c for c, _ in new_columns] +
                              header[1:])
        new.write(new_header)

        for line in old_iter:
            second_column_index = line.index(';')
            path = line[:second_column_index].strip("'")
            if not path.endswith('wav'):
                continue

            info = sets[path]
            new_line = (f"'{path[len(datasets.BASE_DIR) + 1:]}';" + ';'.join(
                str(getter(info))
                for _, getter in new_columns) + line[second_column_index:])

            new.write(new_line)


if __name__ == '__main__':
    try:
        mirror_dir_tree('data')
    except Exception:
        pass

    ds = tuple(
        chain(datasets.get_emovo(), datasets.get_ravdess(),
              datasets.get_cafe(), datasets.get_berlin(), datasets.get_tess()))

    get_features(ds, CONFIG)
    process_aggreagtes(ds)
Beispiel #3
0
            importances[indices],
            color="r",
            yerr=std[indices],
            align="center")
    plt.xticks(range(X.shape[1]), [columns[i] for i in indices],
               rotation='vertical')
    plt.xlim([-1, X.shape[1]])
    plt.show()


if __name__ == '__main__':
    df = pd.DataFrame()

    for info in filter(
            filter_function,
            chain(get_ravdess(), get_emovo(), get_berlin(), get_cafe())):
        features = pd.read_csv(info.features_path, sep=';').iloc[::7, 2:]
        features.insert(0, 'emotion', info.emotion.value, True)

        df = df.append(features)

    data_train, data_test, answer_train, answer_test = train_test_split(
        select_features(df),
        df['emotion'],
        test_size=0.2,
        shuffle=True,
        random_state=20)

    forest = RandomForestClassifier(n_jobs=-1)
    forest_params = dict(
        n_estimators=[1000, 5000],
Beispiel #4
0
            path = line[:index].strip("'")
            if not path.endswith('wav'):
                continue

            info = sets[path]
            new_line = (
                # sry for this
                "'" + path[len(r'C:\Projects\Emotions\learning\\') - 1:] +
                f"';{str(info.emotion.value)};{info.dataset};{info.actor};{info.gender.value};{info.intensity.value};{info.age.value}" +
                line[index:]
            )

            new.write(new_line)


if __name__ == '__main__':
    try:
        mirror_dir_tree('data')
    except Exception:
        pass

    print(get_features(datasets.get_emovo(), EMO2010))
    print(get_features(datasets.get_ravdess(), EMO2010))
    print(get_features(datasets.get_cafe(), EMO2010))
    print(get_features(datasets.get_berlin(), EMO2010))
    print(get_features(datasets.get_tess(), EMO2010))

    process_aggreagtes()