Esempio n. 1
0
if __name__ == '__main__':
    N = 10000
    inputs_1, outputs = get_data(N, input_dim)
    m = build_model()
    m.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
    print(m.summary())
    m.fit([inputs_1], outputs, epochs=20, batch_size=64, validation_split=0.5)

    testing_inputs_1, testing_outputs = get_data(1, input_dim)
    # Attention vector corresponds to the second matrix.
    # The first one is the Inputs output.
    attention_vector = get_activations(
        m, testing_inputs_1, print_shape_only=True,
        layer_name='attention_vec')[0].flatten()
    print('attention =', attention_vector)

    # plot part.
    import matplotlib.pyplot as plt
    import pandas as pd

    pd.DataFrame(attention_vector,
                 columns=['attention (%)'
                          ]).plot(kind='bar',
                                  title='Attention Mechanism as '
                                  'a function of input'
                                  ' dimensions.')
    plt.show()

if __name__ == '__main__':
    N = 10000
    inputs_1, outputs = get_data(N, input_dim)

    m = build_model()
    m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    print(m.summary())

    m.fit([inputs_1], outputs, epochs=20, batch_size=64, validation_split=0.5)

    testing_inputs_1, testing_outputs = get_data(1, input_dim)

    # Attention vector corresponds to the second matrix.
    # The first one is the Inputs output.
    attention_vector = get_activations(m, testing_inputs_1,
                                       print_shape_only=True,
                                       layer_name='attention_vec')[0].flatten()
    print('attention =', attention_vector)

    # plot part.
    import matplotlib.pyplot as plt
    import pandas as pd

    pd.DataFrame(attention_vector, columns=['attention (%)']).plot(kind='bar',
                                                                   title='Attention Mechanism as '
                                                                         'a function of input'
                                                                         ' dimensions.')
    plt.show()
    inputs_1, outputs = get_data(N, input_dim)

    m = build_model()
    m.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
    print(m.summary())

    m.fit([inputs_1], outputs, epochs=20, batch_size=64, validation_split=0.5)

    testing_inputs_1, testing_outputs = get_data(1, input_dim)

    # Attention vector corresponds to the second matrix.
    # The first one is the Inputs output.
    attention_vector = get_activations(m,
                                       testing_inputs_1,
                                       print_shape_only=True)[1].flatten()
    print('attention =', attention_vector)

    # plot part.
    import matplotlib.pyplot as plt
    import pandas as pd

    pd.DataFrame(attention_vector,
                 columns=['attention (%)'
                          ]).plot(kind='bar',
                                  title='Attention Mechanism as '
                                  'a function of input'
                                  ' dimensions.')
    plt.show()
Esempio n. 4
0
    if APPLY_ATTENTION_BEFORE_LSTM:
        m = model_attention_applied_before_lstm()
    else:
        m = model_attention_applied_after_lstm()

    m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    print(m.summary())

    m.fit([inputs_1], outputs, epochs=1, batch_size=64, validation_split=0.1)

    attention_vectors = []
    for i in range(300):
        testing_inputs_1, testing_outputs = get_data_recurrent(1, TIME_STEPS, INPUT_DIM)
        attention_vector = np.mean(get_activations(m,
                                                   testing_inputs_1,
                                                   print_shape_only=True,
                                                   layer_name='attention_vec')[0], axis=2).squeeze()
        print('attention =', attention_vector)
        assert (np.sum(attention_vector) - 1.0) < 1e-5
        attention_vectors.append(attention_vector)

    attention_vector_final = np.mean(np.array(attention_vectors), axis=0)
    # plot part.
    import matplotlib.pyplot as plt
    import pandas as pd

    pd.DataFrame(attention_vector_final, columns=['attention (%)']).plot(kind='bar',
                                                                         title='Attention Mechanism as '
                                                                               'a function of input'
                                                                               ' dimensions.')
    plt.show()
    if APPLY_ATTENTION_BEFORE_LSTM:
        m = model_attention_applied_before_lstm()
    else:
        m = model_attention_applied_after_lstm()

    m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    print(m.summary())

    m.fit([inputs_1], outputs, epochs=1, batch_size=64, validation_split=0.1)

    attention_vectors = []
    for i in range(300):
        testing_inputs_1, testing_outputs = get_data_recurrent(1, TIME_STEPS, INPUT_DIM)
        attention_vector = np.mean(get_activations(m,
                                                   testing_inputs_1,
                                                   print_shape_only=True,
                                                   layer_name='attention_vec')[0], axis=2).squeeze()
        print('attention =', attention_vector)
        assert (np.sum(attention_vector) - 1.0) < 1e-5
        attention_vectors.append(attention_vector)

    attention_vector_final = np.mean(np.array(attention_vectors), axis=0)
    # plot part.
    import matplotlib.pyplot as plt
    import pandas as pd

    pd.DataFrame(attention_vector_final, columns=['attention (%)']).plot(kind='bar',
                                                                         title='Attention Mechanism as '
                                                                               'a function of input'
                                                                               ' dimensions.')
    plt.show()
                                    labels=[0, 1, 2, 3, 4, 5, 6])
        print('Epoch_{}:F1_{}   ER_{}'.format(i, score_list['f1_overall_1sec'],
                                              score_list['er_overall_1sec']))
        print(conf_mat)
        conf_mat = conf_mat / (utils.eps +
                               np.sum(conf_mat, 1)[:, None].astype('float'))
        print(conf_mat)
        model_path = '/data/users/21799506/Data/PRL2018/Evaluation/models_Temp/model_' + str(
            i) + '.h5'
        model.save(model_path)
        plot_functions(nb_epoch, tr_loss, val_loss, f1_overall_1sec_list_eval,
                       er_overall_1sec_list_eval, f1_overall_1sec_list,
                       er_overall_1sec_list)

        attention_vector = get_activations(model,
                                           X_test,
                                           print_shape_only=True,
                                           layer_name='attention_vec')[0]
        conv_vector = get_activations(model,
                                      X_test,
                                      print_shape_only=True,
                                      layer_name='conv_vec')[0]
        savevector = attention_vector.reshape(-1, 256, 256)
        name = '/data/users/21799506/Data/PRL2018/Evaluation/AttentionVector_Temp/attention_vector' + str(
            i)
        sio.savemat(name, {'array_atten': savevector})
        name = '/data/users/21799506/Data/PRL2018/Evaluation/AttentionVector_Temp/conv_vector' + str(
            i)
        sio.savemat(name, {'array_conv': conv_vector})

    lossname = '/data/users/21799506/Data/PRL2018/Evaluation/models_Temp/performance'
    sio.savemat(
              metrics=['accuracy'])
    print(m.summary())
    plot_model(m, to_file='model_lstm.png', show_shapes=True)

    m.fit([inputs_1], outputs, epochs=1, batch_size=64, validation_split=0.1)
    attention_vectors = []

    for i in range(50):

        # 一つデータを生成する
        x, y, attention_columns = get_data_recurrent(1, TIME_STEPS, INPUT_DIM,
                                                     5)

        # attention_vec を指定して値を取り出している
        test = get_activations(m,
                               x,
                               print_shape_only=True,
                               layer_name='attention_vec')

        predicted = m.predict(x)

        # print(len(test[0][0]))
        # print(len(test[0][0][0]))
        # print(np.mean(test[0], axis=2))

        print("input", x.squeeze())
        x_labels = [str(o) for o in x.squeeze()]
        print(x_labels)
        print("testing_outputs", y)
        print("attention_columns", attention_columns)

        # attention_vectorは、3番目の次元、例えば(1, 20, 32)なら 32 で平均を取ったもの。
Esempio n. 8
0
    plt.legend()
    plt.show()

    attention_vectors = []
    for i in range(1):
        testing_inputs_1, testing_inputs_2, testing_outputs = get_data_recurrent_1(
            2, i + 1186, TIME_STEPS, INPUT_DIM, NN_FEATURES)
        print("@@@@@@@@@@@@@@")
        print(testing_inputs_1.shape)

        testing_inputs_2 = testing_inputs_2.reshape(1, 13)
        print(testing_inputs_2.shape)
        print(testing_outputs.shape)
        print("@@@@@@@@@@@@@@")
        attention_vector = np.mean(get_activations(
            m, [testing_inputs_1, testing_inputs_2],
            print_shape_only=True,
            layer_name='attention_vec')[0],
                                   axis=2).squeeze()
        # print('attention =', attention_vector)
        # assert (np.sum(attention_vector) - 1.0) < 1e-5
        attention_vectors.append(attention_vector)

    attention_vector_final = np.mean(np.array(attention_vectors), axis=0)

    # attention = [0.49647075 0.49483868 0.49929917 0.5051765  0.47675964 0.48862302
    #              0.48885727 0.48410046 0.4866048  0.47961804]

    # attention = [0.4820545  0.47170776 0.4883446  0.49935806 0.49587426 0.479177
    #              0.4952951  0.47195023 0.49521935 0.51571256]

    list_1 = []