Beispiel #1
0
        TIME_STEPS,
        INPUT_DIM,
    ))
    attention_mul = attention_3d_block(inputs)
    lstm_units = 32
    attention_mul = LSTM(lstm_units, return_sequences=False)(attention_mul)
    output = Dense(1, activation='sigmoid')(attention_mul)
    model = Model(input=[inputs], output=output)
    return model


if __name__ == '__main__':

    N = 300000
    # N = 300 -> too few = no training
    inputs_1, outputs = get_data_recurrent(N, TIME_STEPS, INPUT_DIM)

    if APPLY_ATTENTION_BEFORE_LSTM:
        m = model_attention_applied_before_lstm()
    else:
        m = model_attention_applied_after_lstm()

    m.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
    print(m.summary())

    m.fit([inputs_1], outputs, epochs=1, batch_size=64, validation_split=0.1)

    attention_vectors = []
    for i in range(300):
    inputs = Input(shape=(
        TIME_STEPS,
        INPUT_DIM,
    ))
    attention_mul = attention_3d_block(inputs)
    attention_mul = LSTM(LSTM_UNITS, return_sequences=False)(attention_mul)
    output = Dense(1, activation='sigmoid')(attention_mul)
    model = Model(input=[inputs], output=output)
    return model


if __name__ == '__main__':

    N = 300000
    #N = 300 #-> too few = no training
    inputs_1, outputs, attention_columns = get_data_recurrent(
        N, TIME_STEPS, INPUT_DIM, 5)

    # for debug
    for ntest in [0, 1]:
        print(len(inputs_1[ntest]), inputs_1[ntest], inputs_1[ntest][10])
        print(outputs[ntest])
        print(attention_columns[ntest])

    if APPLY_ATTENTION_BEFORE_LSTM:
        m = model_attention_applied_before_lstm()
    else:
        m = model_attention_applied_after_lstm()

    m.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
def model_attention_applied_before_lstm():
    inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))
    attention_mul = attention_3d_block(inputs)
    lstm_units = 32
    attention_mul = LSTM(lstm_units, return_sequences=False)(attention_mul)
    output = Dense(1, activation='sigmoid')(attention_mul)
    model = Model(input=[inputs], output=output)
    return model


if __name__ == '__main__':

    N = 300000
    # N = 300 -> too few = no training
    inputs_1, outputs = get_data_recurrent(N, TIME_STEPS, INPUT_DIM)

    if APPLY_ATTENTION_BEFORE_LSTM:
        m = model_attention_applied_before_lstm()
    else:
        m = model_attention_applied_after_lstm()

    m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    print(m.summary())

    m.fit([inputs_1], outputs, epochs=1, batch_size=64, validation_split=0.1)

    attention_vectors = []
    for i in range(300):
        testing_inputs_1, testing_outputs = get_data_recurrent(1, TIME_STEPS, INPUT_DIM)
        attention_vector = np.mean(get_activations(m,
        TIME_STEPS,
        INPUT_DIM,
    ))
    attention_mul = attention_3d_block(inputs)
    lstm_units = 32
    attention_mul = LSTM(lstm_units, return_sequences=False)(attention_mul)
    output = Dense(1, activation='sigmoid')(attention_mul)
    model = Model(input=[inputs], output=output)
    return model


if __name__ == '__main__':

    N = 300000
    # N = 300 -> too few = no training
    inputs_1, inputs_2, outputs, primary_data, scaler = get_data_recurrent(
        N, TIME_STEPS, INPUT_DIM, NN_FEATURES)
    print(inputs_2.shape)
    if not_use_LSTM:
        m = model_ANN()
    else:
        m = model_attention_applied_after_lstm()  # 目前正在用的
    adam = Adam(lr=1.e-4)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
    m.compile(optimizer='adam', loss='mse', metrics=['mse'])  # rmsprop

    # m.compile(optimizer='sgd', loss=myloss, metrics=['mae'])

    last_for_train = 500  # 500
    print(m.summary())
    # 16 - 8 : 0.8
    # 12 - 8