Пример #1
0
def test_compare_state_probs_with_discrete(data_num, data_len, dthmm):
    """Test will run algorithms for counting state probability, determinically with the same initialization for both models"""
    t, _, e = dthmm.generate_data((data_num, data_len), times=True)

    ct = hmms.CtHMM.random(3, 3)
    dt = hmms.DtHMM(*ct.get_dthmm_params())

    assert compare_parameters_no_sort(dt, hmms.DtHMM(*ct.get_dthmm_params()))

    row = e[0]
    trow = t[0]

    #ct
    alpha = ct.forward(trow, row)
    beta = ct.backward(trow, row)
    gamma = ct.single_state_prob(alpha, beta)
    ksi = ct.double_state_prob(alpha, beta, trow, row)
    #dt
    d_alpha = dt.forward(row)
    d_beta = dt.backward(row)
    d_gamma = dt.single_state_prob(d_alpha, d_beta)
    d_ksi = dt.double_state_prob(d_alpha, d_beta, row)

    assert float_equal_mat(gamma, d_gamma)
    assert float_equal_mat(ksi[0], d_ksi)
Пример #2
0
def small_hmm2():
    """Create small DtHMM  for testing of basic functionality"""
    A = numpy.array([[0.9, 0.1], [0.4, 0.6]])
    B = numpy.array([[0.75, 0.25], [0, 1.0]])
    pi = numpy.array([0.8, 0.2])
    hmm = hmms.DtHMM(A, B, pi)

    return hmm
Пример #3
0
def small_hmm():
    """Create small DtHMM and basic emission sequence for testing of basic functionality"""
    A = numpy.array([[0.9, 0.1], [0.4, 0.6]])
    B = numpy.array([[0.9, 0.1], [0.2, 0.8]])
    pi = numpy.array([0.8, 0.2])
    hmm = hmms.DtHMM(A, B, pi)

    emissions = numpy.array([0, 1])
    return (hmm, emissions)
Пример #4
0
def test_estimate(data_num, data_len, dthmm):
    """Test will run algorithms for counting state probability, determinically with the same initialization for both models"""
    t, s, e = dthmm.generate_data((data_num, data_len), times=True)

    ct = hmms.CtHMM.random(3, 3)
    dt = hmms.DtHMM(*ct.get_dthmm_params())

    cte = ct.estimate(s[0], t[0], e[0])
    dte = dt.estimate(s[0], e[0])

    assert (cte == dte)
Пример #5
0
def train_model(data, epoch, eps):
    A = np.array([[1 / 4, 1 / 4, 1 / 4, 1 / 4], [1 / 4, 1 / 4, 1 / 4, 1 / 4],
                  [1 / 4, 1 / 4, 1 / 4, 1 / 4], [1 / 4, 1 / 4, 1 / 4, 1 / 4]])
    B = np.array([[1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3],
                  [1 / 3, 1 / 3, 1 / 3], [1 / 3, 1 / 3, 1 / 3]])
    Pi = np.array([1 / 4, 1 / 4, 1 / 4, 1 / 4])

    index = len(data) - 1
    train = np.array([data[:index]])
    dhmm = hmms.DtHMM(A, B, Pi)
    dhmm.baum_welch(train, epoch)
    base = np.array([data[:index]])
    test = np.array([data[1:1 + index]])
    result = (np.exp(dhmm.data_estimate(base)) - np.exp(
        dhmm.data_estimate(test))) / np.exp(dhmm.data_estimate(base))
    if result < eps:
        flag = 0
    else:
        flag = 1
    return flag
Пример #6
0
def multi_train_ctdt(  hidden_states, times, data, runs, iteration = 10, **kwargs ):
        """Run multiple Baum-welch algorithms, always with different random initialization.
           kwargs:
                method: 'exp' - [default] Use exponential distribution for random initialization
                        'unif' - Use uniform distribution for random initialization

                ret: 'all' - Return all trained models, sorted by their probability estimation
                        'best' - [default] Return only the model with the best probability estimation
        """

        if 'method' not in kwargs : kwargs['method'] = 'exp'   #default exponential
        if 'ret' not in kwargs : kwargs['ret'] = 'best'  #default best

        models_dt = []
        models_ct = []
        outputs = numpy.max( data ) + 1

        #print(hidden_states, outputs)

        for i in range( runs ):

            #Now we will create two equivalent random models.
            model_ct = hmms.CtHMM.random( hidden_states, outputs, method = kwargs['method']  )
            model_dt = hmms.DtHMM(  *model_ct.get_dthmm_params()  )


            graph_ct = model_ct.baum_welch( times, data, iteration, est = True)
            graph_dt = model_dt.baum_welch(        data, iteration, est = True)
            models_ct.append( (model_ct, graph_ct)  )
            models_dt.append( (model_dt, graph_dt)  )


        models_ct.sort(key=lambda x: x[1][-1] , reverse=True)
        models_dt.sort(key=lambda x: x[1][-1] , reverse=True)

        if kwargs['ret'] == 'all': return ( models_dt, models_ct )

        return ( models_dt[0], models_ct[0] )
Пример #7
0
def symmetric_hmm(theta, rho):
    transitions = np.array([[1 - theta, theta], [theta, 1 - theta]])
    emissions = np.array([[1 - rho, rho], [rho, 1 - rho]])
    pi = [0.5, 0.5]
    return hmms.DtHMM(transitions, emissions, pi)
Пример #8
0
def hmm_cycle_out():
    """Desired training output for sequence [[0,1,1]]"""
    A = numpy.array([[0, 1], [1, 0]])
    B = numpy.array([[1, 0], [0, 1]])
    pi = numpy.array([0.5, 0.5])
    return hmms.DtHMM(A, B, pi)
Пример #9
0
def cd_convergence_ex2():

    q = np.array([[[-0.375, 0.125, 0.25], [0.25, -0.5, 0.25],
                   [0.25, 0.125, -0.375]],
                  [[-0.275, 0.025, 0.25], [0.45, -0.7, 0.25],
                   [0.55, 0.225, -0.775]],
                  [[-0.5, 0.25, 0.25], [0.11, -0.44, 0.33],
                   [0.65, 0.42, -1.07]],
                  [[-0.3, 0.15, 0.15], [0.05, -0.5, 0.45],
                   [0.35, 0.025, -0.375]],
                  [[-0.525, 0.5, 0.025], [0.025, -0.725, 0.7],
                   [0.5, 0.015, -0.515]]])
    b = np.array([
        [[0.8, 0.05, 0.15], [0.05, 0.9, 0.05], [0.2, 0.05, 0.75]],
        [[0.7, 0.15, 0.15], [0.05, 0.8, 0.15], [0.0, 0.05, 0.95]],
        [[0.3, 0.35, 0.35], [0.25, 0.7, 0.05], [0.2, 0.15, 0.65]],
        [[0.5, 0.5, 0.0], [0.3, 0.6, 0.1], [0.2, 0.1, 0.7]],
        [[0.2, 0.05, 0.75], [0.8, 0.05, 0.15], [0.05, 0.9, 0.05]],
    ])
    pi = np.array([
        [0.6, 0, 0.4],
        [0.3, 0.3, 0.4],
        [0.6, 0.1, 0.3],
        [0.4, 0.2, 0.4],
        [0.8, 0.15, 0.05],
    ])

    models = 10
    offset = 1

    ##LEGEND

    fig = plt.figure()

    ax = fig.add_subplot(111)

    ax.set_xlabel('iterations')
    ax.set_ylabel('performance ratio')

    for mn in range(models):

        print("mn", mn)
        out_c = []
        out_d = []

        if mn < 5:
            Q = q[mn]
            B = b[mn]
            Pi = pi[mn]
            chmm = hmms.CtHMM(Q, B, Pi)
        else:
            chmm = hmms.CtHMM.random(3, 3, method='exp')

        dhmm = hmms.DtHMM(*chmm.get_dthmm_params())
        hmms.print_parameters(dhmm)

        t, _, e = dhmm.generate_data(
            (50, 50), times=True
        )  # The free space in the return triple is for the state sequences, we do not need them for the training

        creal = chmm.data_estimate(t, e)
        dreal = dhmm.data_estimate(e)
        print("Data estimation by continuous model:", creal)
        print("Data estimation by discrete model:  ", dreal)

        hidden_states = 3
        runs = 10  #20
        iterations = 150
        out_dt, out_ct = hmms.multi_train_ctdt(hidden_states,
                                               t,
                                               e,
                                               runs,
                                               iterations,
                                               ret='all',
                                               method='unif')

        for (m, a) in out_ct:
            out_c.append(a / dreal)

        for (m, a) in out_dt:
            out_d.append(a / dreal)

        ## DATA PLOT

        if mn < 5:
            plt.plot(np.average(out_d, axis=0)[offset:],
                     label='DT - special',
                     color='darkorange')
            plt.plot(np.average(out_c, axis=0)[offset:],
                     label='CT - special',
                     color='blue')
        else:
            plt.plot(np.average(out_d, axis=0)[offset:],
                     label='DT - random',
                     color='red')
            plt.plot(np.average(out_c, axis=0)[offset:],
                     label='CT - random',
                     color='darkblue')

    print("out_c")
    print(out_c)
    print("out_d")
    print(out_d)
    #We can plot and compare both convergence rates. From the essence of models, the continuous model will probably converge a bit slower, but finally will reach the similar value.

    plt.show()
Пример #10
0
def cd_convergence_ex1():

    out_c = []
    out_d = []

    models = 1
    offset = 1

    for m in range(models):

        Q = np.array([[-0.375, 0.125, 0.25], [0.25, -0.5, 0.25],
                      [0.25, 0.125, -0.375]])
        B = np.array([[0.8, 0.05, 0.15], [0.05, 0.9, 0.05], [0.2, 0.05, 0.75]])
        Pi = np.array([0.6, 0, 0.4])

        chmm = hmms.CtHMM(Q, B, Pi)

        dhmm = hmms.DtHMM(*chmm.get_dthmm_params())
        hmms.print_parameters(dhmm)

        t, _, e = dhmm.generate_data(
            (50, 50), times=True
        )  # The free space in the return triple is for the state sequences, we do not need them for the training

        creal = chmm.data_estimate(t, e)
        dreal = dhmm.data_estimate(e)
        print("Data estimation by continuous model:", creal)
        print("Data estimation by discrete model:  ", dreal)

        hidden_states = 3
        runs = 10
        iterations = 150
        out_dt, out_ct = hmms.multi_train_ctdt(hidden_states,
                                               t,
                                               e,
                                               runs,
                                               iterations,
                                               ret='all',
                                               method='unif')

        for (m, a) in out_ct:
            out_c.append(a / dreal)

        for (m, a) in out_dt:
            out_d.append(a / dreal)

    print("out_c")
    print(out_c)
    print("out_d")
    print(out_d)

    ##LEGEND

    fig = plt.figure()

    ax = fig.add_subplot(111)

    ax.set_xlabel('iterations')
    ax.set_ylabel('performance ratio')

    plt.legend()

    ## DATA PLOT

    for i in range(runs * models):
        plt.plot(out_d[i][offset:],
                 label='DT - single run',
                 color='darkorange')
        plt.plot(out_c[i][offset:], label='CT - single run', color='blue')

    ##LEGEND

    fig2 = plt.figure()

    ax2 = fig2.add_subplot(111)

    ax2.set_xlabel('iterations')
    ax2.set_ylabel('performance ratio')

    ## DATA PLOT

    plt.plot(np.average(out_d, axis=0)[offset:],
             label='DT - average',
             color='darkorange')
    plt.plot(np.average(out_c, axis=0)[offset:],
             label='CT - average',
             color='blue')

    plt.legend()

    plt.show()
Пример #11
0
        all_data_outputs = torch.zeros((data.size - (size), 1),
                                       dtype=torch.long)

        latent = binary_dat.astype(np.int_)
        sanitized = data.astype(np.int_)

        print(f'SB acc:{np.sum(latent == sanitized)/latent.shape[0]}',
              file=sys.stderr)
        print(f'SB acc:{np.sum(latent == sanitized)/latent.shape[0]}',
              file=log_fd)

        p = np.array([[1 - q, q], [r, 1 - r]])
        pi = np.array((0.5, 0.5))
        emissions = np.array([[1 - rho_0, rho_0], [rho_1, 1 - rho_1]])

        hmm_model = hmms.DtHMM(p, emissions, pi)

        _, predictions = hmm_model.viterbi(sanitized)
        count = np.sum(predictions == latent)
        viterbi_accuracy = count / predictions.shape[0]
        print(f'Viterbi Acc: {viterbi_accuracy}', file=sys.stderr)
        print(f'Viterbi Acc: {viterbi_accuracy}', file=log_fd)

        for inner_idx in range(data.size - size):
            all_data_inputs[inner_idx] = torch.tensor(
                sanitized[inner_idx:inner_idx + size])
            all_data_outputs[inner_idx] = torch.tensor(latent[inner_idx +
                                                              size // 2])

        all_data = list(zip(all_data_inputs, all_data_outputs))
        num_folds = 5
Пример #12
0
def dthmm(cthmm):
    """The discrete model, created so it behaves identical to the given continuous."""
    return hmms.DtHMM(*cthmm.get_dthmm_params())
# epsilonA, epsilonB = getBestEpsilons(performanceErrors)

# now we compute the final model with the entire train set,
# with the values obtained from the cross-validaton phase

epsilonA, epsilonB = 0.003, 0.000002
print("optimal epsilonA: " + str(epsilonA))
print("optimal epsilonB: " + str(epsilonB))
print(performanceErrors)
A = np.zeros((numberOfStates, numberOfStates))
pi = np.zeros(numberOfStates)
B = np.zeros((numberOfStates, numberOfWords))
numberOfWordsObservedPerState = dict()  # of states
numberOfTimesStateIsFollowedByState = dict()  # of states
numberOfVisitsPerState = dict()  # of states
numberOfWordsObservedPerState[initialState()] = dict()
numberOfTimesStateIsFollowedByState[initialState()] = dict()
computeCounters(trainSet)
computeParameters(epsilonA, epsilonB)

# Save the pos tagger to file
dhmm = hmms.DtHMM(A, B, pi)
dhmm.save_params('./../results/4/hmmParameters')

# compute the tagging error rate on the test set
totalTagOccurrences = dict()
matchesMatrix = dict()
confusionMatrix = dict()
taggingErrorRate = evaluate(trainSet, dhmm)
print(taggingErrorRate)
    with open(output, "a+") as f:
        f.write(f"eps, viterbi_accuracy, lstm_accuracy\n")

    q, r = 0.08937981353871098, 0.10924092409240924
    transitions = np.array([[1 - q, q], [r, 1 - r]])
    pi = [0.5, 0.5]
    for eps in lst_eps:
        # eps = 2
        print('eps:', eps, file=sys.stderr)
        print('eps:', eps, file=log_fd)
        rho_0, rho_1 = min_exp_noise(q, r, eps)
        emissions = np.array([[1 - rho_0, rho_0], [rho_1, 1 - rho_1]])
        print('rho_0, rho_1:', rho_0, rho_1, file=sys.stderr)
        print('rho_0, rho_1:', rho_0, rho_1, file=log_fd)

        hmm = hmms.DtHMM(transitions, emissions, pi)
        latent, sanitized = hmm.generate_data((num_hidden_states, seq_len))
        latent = latent[0]
        sanitized = sanitized[0]
        size = 100

        inputs = torch.zeros((num_hidden_states * (seq_len - (size)), size),
                             dtype=torch.long)
        outputs = torch.zeros((num_hidden_states * (seq_len - (size)), 1),
                              dtype=torch.long)

        # latent = latents[0]
        # sanitized = inference_attack.emissions(latent, hmm.b)
        print(f'SB acc:{np.sum(latent == sanitized)/latent.shape[0]}',
              file=sys.stderr)
        print(f'SB acc:{np.sum(latent == sanitized)/latent.shape[0]}',