def train():
    warnings.filterwarnings("ignore")

    # path to training data
    source = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\Training\\"

    modelpath = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\TestSource\\"

    dest_dir = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\Tilak\\wav\\"

    test_file = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\test.txt"

    prefix = "Tilak\\wav\\"

    file_paths = open(test_file, 'r')

    record_audio("testVoice", dest_dir)
    enroll_edit("testVoice", test_file, prefix)

    gmm_files = [
        os.path.join(source, fname) for fname in os.listdir(source)
        if fname.endswith('.gmm')
    ]

    # load model

    models = [cpk.load(open(fname, 'rb')) for fname in gmm_files]
    speakers = [fname.split("\\")[-1].split(".gmm")[0] for fname in gmm_files]

    sr, audio = read(dest_dir + "testVoice.wav")
    vector = extract_features(audio, sr)
Example #2
0
def validate(user_name):

    source = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Test\\"

    os.makedirs(
        "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Test\\" +
        user_name + "\\wav\\")

    f = open(
        "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Test\\" +
        user_name + "\\test.txt", 'w')

    test_file = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Test\\" + user_name + "\\test.txt"

    dir_test = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Test\\" + user_name + "\\wav\\"

    gmm_dir = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Enroll\\" + user_name + "\\"

    prefix_test = user_name + "\\wav\\"

    f_name1 = user_name + "test"
    record_audio(f_name1, dir_test)
    edit_txt(f_name1, test_file, prefix_test)

    file_paths1 = open(test_file, 'r')

    gmm_files = gmm_dir + user_name + ".gmm"

    # Load the Gaussian gender Models
    models = cpk.load(open(gmm_files, 'rb'))

    # Read the test directory and get the list of test audio files
    for path1 in file_paths1:
        print(path1[:-1])
        path1 = path1[:-1]
        (sr, audio) = read(source + path1)
        vector = extract_features(audio, sr)

        log_likelihood = np.zeros(1)
        gmm = models  # checking with each model one by one
        scores = np.array(gmm.score(vector))
        log_likelihood[0] = scores.sum()
        print(log_likelihood[0])
    if ((logavg / log_likelihood[0]) * 100) > 50:
        print("legit user")
    else:
        print("Invalid user")
prefix_train = user_name + "\\Train\\wav\\"

os.makedirs("C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\" +
            user_name + "\\Test\\wav\\")

test_file = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\test.txt"

dest_dir_test = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\" + user_name + "\\Test\\wav\\"

gmm_dir_test = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\" + user_name + "\\Test\\"

prefix_test = user_name + "\\Test\\wav\\"

print("Welcome " + user_name)

for i in range(3):
    trial = "trial" + str(i + 1)
    print(trial)
    f_name = user_name + str(i + 1)
    record_audio(f_name, dest_dir_train)
    edit_txt(f_name, train_file, prefix_train)

file_paths = open(train_file, 'r')

count = 1

features = np.asarray(())
for path in file_paths:
    path = path.strip()
    print(path)
Example #4
0
def enroll(user_name):

    source = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Enroll\\"

    os.makedirs(
        "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Enroll\\" +
        user_name + "\\wav\\")

    train_dir = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Enroll\\" + user_name + "\\wav\\"

    gmm_dir = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Enroll\\" + user_name + "\\"

    prefix_train = user_name + "\\wav\\"

    f = open(
        "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Enroll\\" +
        user_name + "\\enroll.txt", 'w')

    enroll_file = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\22apr2018\\Enroll\\" + user_name + "\\enroll.txt"

    global logavg

    print("Welcome" + user_name)

    for i in range(3):
        trial = "trial" + str(i + 1)
        print(trial)
        key_phrase = rand_phrase()
        print("Please say '" + key_phrase +
              "' once the system starts listening")
        time.sleep(2)
        f_name = user_name + str(i + 1)
        record_audio(f_name, train_dir)
        edit_txt(f_name, enroll_file, prefix_train)

    file_paths = open(enroll_file, 'r')
    file_paths1 = open(enroll_file, 'r')

    count = 1

    features = np.asarray(())
    for path in file_paths:
        path = path.strip()
        print(path)

        # read the audio
        sr, audio = read(source + path)

        # extract 40 dimensional MFCC & delta MFCC features
        vector = extract_features(audio, sr)

        if features.size == 0:
            features = vector
        else:
            features = np.vstack((features, vector))
        # when features of 3 files of speaker are concatenated, then do model training
        if count == 3:
            gmm = GMM(n_components=16,
                      n_iter=200,
                      covariance_type='diag',
                      n_init=3)
            gmm.fit(features)

            # dumping the trained gaussian model
            picklefile = user_name + ".gmm"
            cpk.dump(gmm, open(gmm_dir + picklefile, 'wb'))
            print('+ modeling completed for speaker:', picklefile,
                  " with data point = ", features.shape)
            features = np.asarray(())
            count = 0
        count = count + 1

    gmm_files = gmm_dir + user_name + ".gmm"
    models = cpk.load(open(gmm_files, 'rb'))

    logsum = 0

    for path1 in file_paths1:
        path1 = path1.strip()
        print(path1)
        (sro, audioo) = read(source + path1)
        vector_o = extract_features(audioo, sro)

        log_likelihood_orig = np.zeros(1)
        gmm = models  # checking with each model one by one
        scores = np.array(gmm.score(vector_o))
        log_likelihood_orig[0] = scores.sum()
        print(log_likelihood_orig[0])
        logsum = logsum + log_likelihood_orig[0]
    logavg = logsum / 3
    print(logavg)
    del_txt(enroll_file)
    return logavg
Example #5
0
dest_dir = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\Tilak\\wav\\"

train_file = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\enroll.txt"

prefix = "Tilak\\wav\\"

features = np.asarray(())

user_name = input("Enter username")
print("Welcome " + user_name)
for i in range(3):
    trial = "trial" + str(i + 1)
    print(trial)
    f_name = user_name + str(i + 1)
    record_audio(f_name, dest_dir)
    enroll_edit(f_name, train_file, prefix)

file_paths = open(train_file, 'r')

count = 1

for path in file_paths:
    path = path.strip()
    print(path)

    # read the audio
    sr, audio = read(source + path)

    # extract 40 dimensional MFCC & delta MFCC features
    vector = extract_features(audio, sr)
Example #6
0
os.makedirs("C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\" + user_name + "\\Test\\wav\\")

test_file = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\test.txt"

dest_dir_test = "C:\\Users\\Tilak1114\\Desktop\\DEVHACK2018\\18apr2018\\" + user_name + "\\Test\\wav\\"

prefix_test = user_name + "\\Test\\wav\\"

print("Welcome "+user_name)

for i in range(3):   # 3 inputs
    trial = "trial"+str(i+1)
    print(trial)
    f_name = user_name+str(i+1)
    record_audio(f_name, dest_dir_train)  # record audio
    edit_txt(f_name, train_file, prefix_train)   # write to train/enroll.txt

file_paths = open(train_file, 'r')  # read enroll.txt

count = 1

features = np.asarray(())
for path in file_paths:  # iterate 3 times cuz 3 audio files
    path = path.strip()
    print(path)

    # read the audio
    sr, audio = read(source + path)

    # extract 40 dimensional MFCC & delta MFCC features
def enroll(user_name):

    source_enroll = "..\\Enroll\\"

    os.makedirs("..\\Enroll\\" + user_name + "\\wav\\")

    train_dir = "..\\Enroll\\" + user_name + "\\wav\\"

    gmm_dir = "..\\Enroll\\" + user_name + "\\"

    prefix_train = user_name + "\\wav\\"

    f = open("..\\Enroll\\" + user_name + "\\enroll.txt", 'w')

    enroll_file = "..\\Enroll\\" + user_name + "\\enroll.txt"

    print("Welcome" + user_name)

    for i in range(5):
        trial = "trial" + str(i + 1)
        print(trial)
        key_phrase = rand_phrase_train()
        print("Please say '" + key_phrase +
              "' once the system starts listening")
        time.sleep(2)
        f_name = user_name + str(i + 1)
        record_audio(f_name, train_dir)
        edit_txt(f_name, enroll_file, prefix_train)

    file_paths = open(enroll_file, 'r')

    count = 1

    features = np.asarray(())
    for path in file_paths:
        path = path.strip()
        print(path)

        # read the audio
        sr, audio = read(source_enroll + path)

        # extract 40 dimensional MFCC & delta MFCC features
        vector = extract_features(audio, sr)

        if features.size == 0:
            features = vector
        else:
            features = np.vstack((features, vector))
        # when features of 5 files of speaker are concatenated, then do model training
        if count == 5:
            gmm = GMM(n_components=8,
                      n_iter=200,
                      covariance_type='diag',
                      n_init=3)
            gmm.fit(features)

            # dumping the trained gaussian model
            picklefile = user_name + ".gmm"
            cpk.dump(gmm, open(gmm_dir + picklefile, 'wb'))
            print('+ modeling completed for speaker:', picklefile,
                  " with data point = ", features.shape)
            features = np.asarray(())
            count = 0
        count = count + 1
def validate(user_name):

    global log_likelihood

    log_likelihood = np.zeros(1)

    source_enroll = "..\\Enroll\\"

    source_test = "..\\Test\\"

    os.makedirs("..\\Test\\" + user_name + "\\wav\\")

    f = open("..\\Test\\" + user_name + "\\test.txt", 'w')

    test_file = "..\\Test\\" + user_name + "\\test.txt"

    dir_test = "..\\Test\\" + user_name + "\\wav\\"

    gmm_dir = "..\\Enroll\\" + user_name + "\\"

    enroll_file = "..\\Enroll\\" + user_name + "\\enroll.txt"

    prefix_test = user_name + "\\wav\\"
    # ead = input("Enter email address so that an otp can be sent:")
    # pwd = input("enter your mails password")
    messg = rand_phrase_test()
    # sendotph(ead, pwd, messg)
    rt = input(
        "Are you ready to validate? hit 'y' if you have the key-phrase sent via email"
    )
    if rt == 'y':
        print("say your key-phrase '" + messg + "'")
        f_name1 = user_name + "test"

        record_audio(f_name1, dir_test)

        edit_txt(f_name1, test_file, prefix_test)

    gmm_files = gmm_dir + user_name + ".gmm"
    models = cpk.load(open(gmm_files, 'rb'))

    file_paths_orig = open(enroll_file, 'r')

    logsum = 0
    log_likelihood_orig = np.zeros(1)

    for path_orig in file_paths_orig:
        path_orig = path_orig.strip()
        print(path_orig)
        (sro, audioo) = read(source_enroll + path_orig)
        vector_o = extract_features(audioo, sro)
        gmm = models  # checking with each model one by one
        scores = np.array(gmm.score(vector_o))
        log_likelihood_orig[0] = scores.sum()
        print(log_likelihood_orig[0])
        logsum = logsum + log_likelihood_orig[0]
    logavg = logsum / 5
    print(logavg)  # uncomment later

    file_paths1 = open(test_file, 'r')

    # Read the test directory and get the list of test audio files
    for path1 in file_paths1:
        print(path1[:-1])
        path1 = path1[:-1]
        (sr, audio) = read(source_test + path1)
        vector = extract_features(audio, sr)
        gmm = models  # checking with each model one by one
        scores = np.array(gmm.score(vector))
        log_likelihood[0] = scores.sum()
        print(log_likelihood[0])  #uncomment for reference

    if ((logavg / log_likelihood[0]) * 100) > 85:
        print("legit user")
    else:
        print("Invalid user")