def forcedAlignment(lmfcc, phoneHMMs, phoneTrans):
    """ forcedAlignmen: aligns a phonetic transcription at the state level

    Args:
       lmfcc: NxD array of MFCC feature vectors (N vectors of dimension D)
              computed the same way as for the training of phoneHMMs
       phoneHMMs: set of phonetic Gaussian HMM models
       phoneTrans: list of phonetic symbols to be aligned including initial and
                   final silence

    Returns:
       list of strings in the form phoneme_index specifying, for each time step
       the state from phoneHMMs corresponding to the viterbi path.
    """
    # phone transcription => state transcription
    phones = sorted(phoneHMMs.keys())
    nstates = {phone: phoneHMMs[phone]['means'].shape[0] for phone in phones}
    stateTrans = [p + '_' + str(i) for p in phoneTrans for i in range(nstates[p])]

    # combined HMM for utterance
    utteranceHMM = concatHMMs(phoneHMMs, phoneTrans)

    # Viterbi decoder
    obsloglik = log_multivariate_normal_density_diag(lmfcc, utteranceHMM['means'], utteranceHMM['covars'])
    viterbiPath = viterbi(obsloglik, np.log(utteranceHMM['startprob']), np.log(utteranceHMM['transmat']))[1]

    # time alignment (frame-by-frame state transcription)
    viterbiStateTrans = [stateTrans[s] for s in viterbiPath]

    return viterbiStateTrans
def forward_algorithm():
    wordHMMs = {}
    isolated = get_isolated(prondict)
    plot_p_color_mesh(example['logalpha'], 'example alpha matrix')

    # verify implementation
    wordHMMs['o'] = concatHMMs(phoneHMMsAll, isolated['o'])
    log_st_prob = np.log(wordHMMs['o']['startprob'])
    log_transmat = np.log(wordHMMs['o']['transmat'])
    alpha_matrix = forward(example['obsloglik'], log_st_prob, log_transmat)
    plot_p_color_mesh(alpha_matrix, "hmms all output example alpha matrix")

    # 44 data labels
    keys_list = [x for x in isolated.keys()]
    scores_models_all = np.zeros((len(data), len(isolated)))
    scores_models_onespkr = np.zeros_like(scores_models_all)

    for j in range(len(keys_list)):
        key = keys_list[j]
        hmms = concatHMMs(phoneHMMsAll, isolated[key])
        log_st_prob = np.log(hmms['startprob'])
        log_transmat = np.log(hmms['transmat'])
        for i in range(len(data)):
            lpr_test = log_multivariate_normal_density_diag(
                data[i]['lmfcc'], hmms['means'], hmms['covars'])
            alpha = forward(lpr_test, log_st_prob, log_transmat)
            scores_models_all[i, j] = logsumexp(alpha[len(alpha) - 1])

        hmms = concatHMMs(phoneHMMsOne, isolated[key])
        log_st_prob = np.log(hmms['startprob'])
        log_transmat = np.log(hmms['transmat'])
        for i in range(len(data)):
            lpr_test = log_multivariate_normal_density_diag(
                data[i]['lmfcc'], hmms['means'], hmms['covars'])
            alpha = forward(lpr_test, log_st_prob, log_transmat)
            scores_models_onespkr[i, j] = logsumexp(alpha[len(alpha) - 1])

    predict_all = np.argmax(scores_models_all, axis=1)
    predict_one = np.argmax(scores_models_onespkr, axis=1)

    label_all = [keys_list[x] for x in predict_all]
    label_one = [keys_list[x] for x in predict_one]

    true_label = [data[x]['digit'] for x in range(len(data))]
    print(true_label)
    print(label_all)
    print(label_one)
def EM_learning():
    wordHMMs = {}
    isolated = get_isolated(prondict)
    max_iteration = 20
    threshold = 1.0
    # first case
    wordHMMs['4'] = concatHMMs(phoneHMMsAll, isolated['4'])
    four = data[10]
    model = wordHMMs['4']
    lpr, old_likelihood = loglikelihood(four, model)
    likelihood_list = [old_likelihood]
    for it in range(max_iteration):
        model = EM(four['lmfcc'], lpr, model)
        lpr, new_likelihood = loglikelihood(four, model)
        likelihood_list.append(new_likelihood)
        if np.abs(new_likelihood - old_likelihood) < threshold:
            break
        old_likelihood = new_likelihood
    # plo first case
    fig = plt.figure(figsize=(12, 6))
    ax = plt.subplot(121)
    ax.set_title('EM learning, data[10], wordHMM[4]')
    plt.plot(likelihood_list, color='red')
    plt.show()

    # first case
    wordHMMs['z'] = concatHMMs(phoneHMMsAll, isolated['z'])
    four = data[10]
    model = wordHMMs['z']
    lpr, old_likelihood = loglikelihood(four, model)
    likelihood_list = [old_likelihood]
    for it in range(max_iteration):
        model = EM(four['lmfcc'], lpr, model)
        lpr, new_likelihood = loglikelihood(four, model)
        likelihood_list.append(new_likelihood)
        if np.abs(new_likelihood - old_likelihood) < threshold:
            break
        old_likelihood = new_likelihood
    # plo first case
    fig = plt.figure(figsize=(12, 6))
    ax = plt.subplot(121)
    ax.set_title('EM learning, data[10], wordHMM[z]')
    plt.plot(likelihood_list, color='red')
    plt.show()
def backward_algorithm():
    wordHMMs = {}
    isolated = get_isolated(prondict)
    plot_p_color_mesh(example['logbeta'], 'example beta matrix')

    # verify implementation
    wordHMMs['o'] = concatHMMs(phoneHMMsAll, isolated['o'])
    log_st_prob = np.log(wordHMMs['o']['startprob'])
    log_transmat = np.log(wordHMMs['o']['transmat'])
    beta_matrix = backward(example['obsloglik'], log_st_prob, log_transmat)
    plot_p_color_mesh(beta_matrix, "hmms all output example beta matrix")
Example #5
0
    def test_concatHMMs(self):
        """
        test_concatHMMs: Required verification in section 5.1
        """
        # obtain the concated HMM model
        wordHMMs = {}
        wordHMMs['o'] = concatHMMs(self.phoneHMMs, isolated['o'])
        means = wordHMMs['o']['means']
        covars = wordHMMs['o']['covars']

        # use example observation to calculate the log likelihood of observation
        obsloglik = log_multivariate_normal_density_diag(
            self.example['lmfcc'], means, covars)

        # check against with the example
        assert_allclose(obsloglik, self.example['obsloglik'])
def gaussian_emission_prob():
    example_x = example['lmfcc']
    wordHMMs = {}
    isolated = get_isolated(prondict)
    wordHMMs['o'] = concatHMMs(phoneHMMsAll, isolated['o'])
    lpr = log_multivariate_normal_density_diag(example_x,
                                               wordHMMs['o']['means'],
                                               wordHMMs['o']['covars'])

    test_o = data[0]
    test_o_lmfcc = test_o['lmfcc']
    lpr_test = log_multivariate_normal_density_diag(test_o_lmfcc,
                                                    wordHMMs['o']['means'],
                                                    wordHMMs['o']['covars'])
    plot_p_color_mesh(lpr, 'example log likelihood')
    plot_p_color_mesh(lpr_test, 'test log likelihood')
Example #7
0
def forcedAlignment(lmfcc, phoneHMMs, phoneTrans):
    """ forcedAlignmen: aligns a phonetic transcription at the state level

    Args:
       lmfcc: NxD array of MFCC feature vectors (N vectors of dimension D)
              computed the same way as for the training of phoneHMMs
       phoneHMMs: set of phonetic Gaussian HMM models
       phoneTrans: list of phonetic symbols to be aligned including initial and
                   final silence

    Returns:
        if return_syb:
            list of strings in the form phoneme_index specifying, for each time step
            the state from phoneHMMs corresponding to the viterbi path.
    """
    # Obtain the mapping from state to number of state
    nstates = dict()
    for ph in phoneHMMs.keys():
        num_state = phoneHMMs[ph]['means'].shape[0]
        nstates[ph] = num_state
    # Obtain a mapping from the phoneHMMs to statename
    stateTrans = list()
    for ph in phoneTrans:
        for i in range(nstates[ph]):
            stateTrans.append("%s_%i" % (ph, i))
    # ===========================================================
    # Create the hmm model for this utterance with only the information
    # of transcription
    utteranceHMM = concatHMMs(phoneHMMs, phoneTrans)

    # calculate the Viterbi path
    means = utteranceHMM['means']
    covars = utteranceHMM['covars']
    log_emlik = log_mnd_diag(lmfcc, means, covars)
    # get \pi and A; ignore the terminal state
    log_pi = np.log(utteranceHMM['startprob'][:-1])
    log_trans = np.log(utteranceHMM['transmat'][:-1, :-1])
    _, path = viterbi(log_emlik, log_pi, log_trans)
    # =========================================================
    ret = [stateTrans[i] for i in path]

    return ret
def forcedAlignment(lmfcc, phoneHMMs, phoneTrans):
    """ forcedAlignmen: aligns a phonetic transcription at the state level

   Args:
      lmfcc: NxD array of MFCC feature vectors (N vectors of dimension D)
            computed the same way as for the training of phoneHMMs
      phoneHMMs: set of phonetic Gaussian HMM models
      phoneTrans: list of phonetic symbols to be aligned including initial and
                  final silence

   Returns:
      list of strings in the form phoneme_index specifying, for each time step
      the state from phoneHMMs corresponding to the viterbi path.
   """
    utteranceHMM = concatHMMs(phoneHMMs, phoneTrans)
    emmision = log_multivariate_normal_density_diag(lmfcc,
                                                    utteranceHMM['means'],
                                                    utteranceHMM['covars'])
    return viterbi(emmision, np.log(utteranceHMM['startprob']),
                   np.log(utteranceHMM['transmat']))
def state_posterior_test():
    posterior = statePosteriors(example['logalpha'], example['logbeta'])

    plot_p_color_mesh(posterior, 'log state hmm posterior')

    posterior = np.exp(posterior)
    print('verification')
    print(np.sum(posterior, axis=1))
    print('6.1 summing over time steps')
    print(np.sum(posterior, axis=0))
    print('6.1 summing over time steps and states')
    print(np.sum(posterior))

    example_x = example['lmfcc']
    wordHMMs = {}
    isolated = get_isolated(prondict)
    wordHMMs['o'] = concatHMMs(phoneHMMsAll, isolated['o'])
    lpr = log_multivariate_normal_density_diag(example_x,
                                               wordHMMs['o']['means'],
                                               wordHMMs['o']['covars'])
    plot_p_color_mesh(lpr, 'log state gmm posterior')
Example #10
0
            'label': label,
            'matched_model': matched_model,
            'score': max_loglik
        })
    return ret


if __name__ == "__main__":
    # load data
    data = np.load('data/lab2_data.npz')['data']
    example = np.load('data/lab2_example.npz')['example'].item()
    phoneHMMs = np.load('data/lab2_models_onespkr.npz')['phoneHMMs'].item()

    # Build hmm
    wordHMMs = {}
    wordHMMs['o'] = concatHMMs(phoneHMMs, isolated['o'])

    trans_mat = wordHMMs['o']['transmat'][:-1, :-1]
    pi_vec = wordHMMs['o']['startprob'][:-1]
    # =====================================================
    log_alpha = forward(example['obsloglik'], np.log(pi_vec),
                        np.log(trans_mat))

    # calculate the sequence log likelihood to this hmm model
    # i.e. log[P(X_{1:T} | HMM model)]
    log_seq_likelihood = logsumexp(log_alpha[-1])
    print("The log likelihood of the observation seq to the model:",
          log_seq_likelihood)
    # ==================================================================================
    # check if closed to the example
    # is_alpha_close = np.allclose(log_alpha, example['logalpha'])
import numpy as np
from lab2_proto import concatHMMs

data = np.load('lab2_data.npz', allow_pickle=True)['data']

phoneHMMs = np.load('lab2_models_onespkr.npz', allow_pickle=True)['phoneHMMs'].item()
# phoneHMMs = np.load('lab2_models_all.npz', allow_pickle=True)['phoneHMMs'].item()



wordHMMs = {}
wordHMMs['o'] = concatHMMs(phoneHMMs, ['sil', 'ow', 'sil'])

Example #12
0
from prondict import isolated
from lab2_proto import concatHMMs
from lab2_proto import backward
from lab2_proto import forward
from lab2_proto import statePosteriors
from lab2_tools import logsumexp

if __name__ == "__main__":
    # load data
    data = np.load('data/lab2_data.npz')['data']
    example = np.load('data/lab2_example.npz')['example'].item()
    phoneHMMs = np.load('data/lab2_models_onespkr.npz')['phoneHMMs'].item()

    # Build hmm
    wordHMMs = {}
    wordHMMs['o'] = concatHMMs(phoneHMMs, isolated['o'])

    trans_mat = wordHMMs['o']['transmat'][:-1, :-1]
    pi_vec = wordHMMs['o']['startprob'][:-1]
    log_startprob = np.log(pi_vec)
    log_transmat = np.log(trans_mat)
    log_emlik = example['obsloglik']
    # =====================================================
    log_beta = backward(log_emlik, log_startprob, log_transmat)
    log_alpha = forward(log_emlik, log_startprob, log_transmat)

    # caculate the log gamma
    log_gamma = statePosteriors(log_alpha, log_beta)

    # print(np.allclose(example['loggamma'], log_gamma))
def verify_concat_hmms():
    wordHMMs = {}
    isolated = get_isolated(prondict)
    wordHMMs['o'] = concatHMMs(phoneHMMsAll, isolated['o'])
    plot_p_color_mesh(wordHMMs['o']['transmat'], 'word o transmat')
def viterbi_algorithm():
    wordHMMs = {}
    isolated = get_isolated(prondict)

    # verify implementation
    wordHMMs['o'] = concatHMMs(phoneHMMsAll, isolated['o'])
    log_st_prob = np.log(wordHMMs['o']['startprob'])
    log_transmat = np.log(wordHMMs['o']['transmat'])
    vloglik, bestPath = viterbi(example['obsloglik'], log_st_prob,
                                log_transmat)
    alpha_matrix = forward(example['obsloglik'], log_st_prob, log_transmat)
    print('vloglik from viterbi():', vloglik)
    print('vloglik from example:', example['vloglik'])

    # plot
    fig = plt.figure(figsize=(12, 6))
    ax = plt.subplot(121)
    ax.set_title('viterbi path from Viterbi()')
    plt.pcolormesh(alpha_matrix)
    plt.plot(bestPath, np.arange(len(bestPath)), color='red')
    plt.colorbar()
    plt.show()

    fig = plt.figure(figsize=(12, 6))
    ax = plt.subplot(121)
    ax.set_title('viterbi path from example')
    plt.pcolormesh(example['logalpha'])
    plt.plot(example['vpath'], np.arange(len(bestPath)), color='red')
    plt.colorbar()
    plt.show()

    # 44 data labels
    keys_list = [x for x in isolated.keys()]
    scores_models_all = np.zeros((len(data), len(isolated)))
    scores_models_onespkr = np.zeros_like(scores_models_all)

    for j in range(len(keys_list)):
        key = keys_list[j]
        hmms = concatHMMs(phoneHMMsAll, isolated[key])
        log_st_prob = np.log(hmms['startprob'])
        log_transmat = np.log(hmms['transmat'])
        for i in range(len(data)):
            lpr_test = log_multivariate_normal_density_diag(
                data[i]['lmfcc'], hmms['means'], hmms['covars'])
            loglik, path = viterbi(lpr_test, log_st_prob, log_transmat)
            scores_models_all[i, j] = loglik

        hmms = concatHMMs(phoneHMMsOne, isolated[key])
        log_st_prob = np.log(hmms['startprob'])
        log_transmat = np.log(hmms['transmat'])
        for i in range(len(data)):
            lpr_test = log_multivariate_normal_density_diag(
                data[i]['lmfcc'], hmms['means'], hmms['covars'])
            loglik, path = viterbi(lpr_test, log_st_prob, log_transmat)
            scores_models_onespkr[i, j] = loglik

    predict_all = np.argmax(scores_models_all, axis=1)
    predict_one = np.argmax(scores_models_onespkr, axis=1)

    label_all = [keys_list[x] for x in predict_all]
    label_one = [keys_list[x] for x in predict_one]

    true_label = [data[x]['digit'] for x in range(len(data))]
    print(true_label)
    print(label_all)
    print(label_one)
Example #15
0
        if (loglik - loglik_old) < threshold or np.isnan(loglik):
            print("Terminating the EM")
            break
        else:
            loglik_old = loglik


if __name__ == "__main__":
    # load data
    data = np.load('data/lab2_data.npz')['data']
    phoneHMMs = np.load('data/lab2_models_onespkr.npz')['phoneHMMs'].item()

    # Build hmm
    wordHMMs = {}
    for d in isolated.keys():
        wordHMMs[d] = concatHMMs(phoneHMMs, isolated[d])

    # get the observation sequence
    feature = data[10]['lmfcc']

    # First part
    # calculate the emissions
    digit = '4'
    means = wordHMMs[digit]['means']
    covars = wordHMMs[digit]['covars']
    obsloglik = log_multivariate_normal_density_diag(feature, means, covars)

    # calculate the log likelihood
    trans_mat = wordHMMs[digit]['transmat'][:-1, :-1]
    pi_vec = wordHMMs[digit]['startprob'][:-1]
    # log space
Example #16
0
    # feature extraction
    filename = 'data/tidigits/disc_4.1.1/tidigits/train/man/nw/z43a.wav'
    samples, samplingrate = loadAudio(filename)
    lmfcc = mfcc(samples)

    # transcription
    wordTrans = list(path2info(filename)
                     [2])  # word transcription (contained in the filename)
    phoneTrans = words2phones(
        wordTrans, prondict)  # word transcription => phone transcription
    stateTrans = [
        p + '_' + str(i) for p in phoneTrans for i in range(nstates[p])
    ]  # phone transcription => state transcription

    # combined HMM for utterance
    utteranceHMM = concatHMMs(phoneHMMs, phoneTrans)

    # Viterbi decoder
    obsloglik = log_multivariate_normal_density_diag(lmfcc,
                                                     utteranceHMM['means'],
                                                     utteranceHMM['covars'])
    viterbiLoglik, viterbiPath = viterbi(obsloglik,
                                         np.log(utteranceHMM['startprob']),
                                         np.log(utteranceHMM['transmat']))

    # time alignment (frame-by-frame state transcription)
    viterbiStateTrans = [stateTrans[s] for s in viterbiPath]

    # save in standard format (to use it, put it in the same directory of .wav and open .wav with wavesurfer)
    frames2trans(viterbiStateTrans, outfilename='data/transcriptions/z43a.lab')
Example #17
0
            'label': label,
            'matched_model': matched_model,
            'score': score
        })
    return ret


if __name__ == "__main__":
    # load data
    data = np.load('data/lab2_data.npz')['data']
    example = np.load('data/lab2_example.npz')['example'].item()
    phoneHMMs = np.load('data/lab2_models_onespkr.npz')['phoneHMMs'].item()

    # Build hmm
    wordHMMs = {}
    wordHMMs['o'] = concatHMMs(phoneHMMs, isolated['o'])

    trans_mat = wordHMMs['o']['transmat'][:-1, :-1]
    pi_vec = wordHMMs['o']['startprob'][:-1]
    # =====================================================
    best_seq_loglik, best_path = viterbi(example['obsloglik'], np.log(pi_vec),
                                         np.log(trans_mat))
    assert np.allclose(best_seq_loglik, example['vloglik'])
    assert np.array_equal(best_path, example['vpath'])
    # ========================================================================
    onespkr_wordHMMs = {}
    for k in isolated.keys():
        onespkr_wordHMMs[k] = concatHMMs(phoneHMMs, isolated[k])

    phoneHMMs_all = np.load('data/lab2_models_all.npz')['phoneHMMs'].item()
    for d in isolated.keys():