def main():
    nr_person = 50
    fpaths = get_training_data_fpaths()
    X_train, y_train, X_test, y_test = datautil.read_data(fpaths, nr_person)
    ubm = GMM.load("model/ubm-32.model")
    for x, y in zip(X_train, y_train):
        gmm = GMM(concurrency=8, threshold=0.01, nr_iteration=100, verbosity=1)
        gmm.fit(x, ubm=ubm)
        gmm.dump("model/" + y + ".32.model")
Exemple #2
0
def main():
    nr_person = 50
    fpaths = get_training_data_fpaths()
    X_train, y_train, X_test, y_test = datautil.read_data(fpaths, nr_person)
    ubm = GMM.load('model/ubm-32.model')
    for x, y in zip(X_train, y_train):
        gmm = GMM(concurrency=8, threshold=0.01, nr_iteration=100, verbosity=1)
        gmm.fit(x, ubm=ubm)
        gmm.dump("model/" + y + ".32.model")
Exemple #3
0
    for imposter_audio_file in map(lambda x: 'test-{}.wav'.format(x),
                                   range(5)):
        fs, signal = wavfile.read(imposter_audio_file)
        signal = monotize_signal(signal)
        imposter_x = mix_feature((fs, signal))
        print gmmset.predict_one_with_rejection(imposter_x)


test_ubm_var_channel()
import sys
sys.exit(0)

ubm = GMM.load('model/ubm.mixture-32.person-20.immature.model')
gmm = GMM(32, verbosity=1)

#audio_file = 'test-data/corpus.silence-removed/Style_Reading/f_001_03.wav'

fs, signal = wavfile.read(audio_file)
signal = monotize_signal(signal)
X = mix_feature((fs, signal))

ubm = GMM.load('model/ubm.mixture-32.person-20.immature.model')
gmm = GMM(32, verbosity=1)

X = X[:1000]
gmm.fit(X, ubm=ubm)
gmm.dump('xinyu.model')

# vim: foldmethod=marker
    for imposter_audio_file in map(
            lambda x: 'test-{}.wav'.format(x), range(5)):
        fs, signal = wavfile.read(imposter_audio_file)
        signal = monotize_signal(signal)
        imposter_x = mix_feature((fs, signal))
        print gmmset.predict_one_with_rejection(imposter_x)

test_ubm_var_channel()
import sys
sys.exit(0)

ubm = GMM.load('model/ubm.mixture-32.person-20.immature.model')
gmm = GMM(32, verbosity=1)

#audio_file = 'test-data/corpus.silence-removed/Style_Reading/f_001_03.wav'

fs, signal = wavfile.read(audio_file)
signal = monotize_signal(signal)
X = mix_feature((fs, signal))

ubm = GMM.load('model/ubm.mixture-32.person-20.immature.model')
gmm = GMM(32, verbosity=1)

X = X[:1000]
gmm.fit(X, ubm=ubm)
gmm.dump('xinyu.model')

# vim: foldmethod=marker