Beispiel #1
0
model.compile(loss='binary_crossentropy',
              optimizer='adam', metrics=['accuracy'])
model.summary()

filename = os.path.join(current_dir, 'data', 'complain_model.h5')
is_training = False
if is_training:
    model.fit(X_train, Y_train, validation_data=(
        X_test, Y_test), epochs=20, batch_size=64)

    # Evaluate the model
    scores = model.evaluate(X_test, Y_test, verbose=0)
    print("Evaluation Accuracy: %.2f%%" % (scores[1]*100))
    model.save(filename, save_format='tf')
else:
    model.load_weights(filename)

t1 = time.time()

lstm_upstream = tf.keras.Model(
    inputs=model.input, outputs=model.get_layer('max_pooling1d').output)
lstm_input = lstm_upstream.predict(X_test, batch_size=8)
# print(lstm_input.shape)

num_records = lstm_input.shape[0]
quantized_lstm_input = quanti_convert_float_to_int16(
    lstm_input.reshape(num_records * 25*32), in_pos).reshape((num_records, 25*32))
lstm_output = np.zeros((num_records, 25*100), dtype=np.int16)


runner_idx = count = 0
Beispiel #2
0
embedding_vector_length = 200
model = Sequential()
model.add(
    Embedding(num_words + 1,
              embedding_vector_length,
              input_length=pad_x_preds.shape[1],
              mask_zero=True))
model.add(LSTM(512))
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()
print("Loading weights")
model.load_weights('./Models/lstm_full1_checkpoint.hdf5')
print("Weights loaded")

#-------------------------------------------------------------------------------------------------------------------
#PREDICTION
print("Start prediction")
y_pred = model.predict(pad_x_preds)
idx_test, predicted = reformat(y_pred.squeeze())
print("Prediction done")

submission_path = './Models/lstm_simplefc_full.csv'
with open(submission_path, 'w+', newline='') as csvfile:
    fieldnames = ['Id', 'Prediction']
    writer = csv.DictWriter(csvfile, delimiter=",", fieldnames=fieldnames)
    writer.writeheader()
    for r1, r2 in zip(idx_test, predicted):
    # saver.save(sess, 'ckpt/sae.ckpt', global_step=epoch)

    # pred = mlp.predict(h
    # print(np.mean(np.abs(pred-train_y[-DATA_A_DAY:])))
    mae = validate(mlp)
    predict(mlp)

    end = datetime.datetime.now()

    rcd = str(end) + '\n'
    rcd += "lr: " + str(lr) + '\n'
    rcd += "batch_size: " + str(batch_size) + '\n'
    rcd += "l2_param: " + str(l2_param) + '\n'
    rcd += "dropout: " + str(dropout) + '\n'
    rcd += "training_epochs: " + str(training_epochs) + '\n'
    rcd += "n_inputs: " + str(n_inputs) + '\n'
    rcd += "n_outputs: " + str(n_outputs) + '\n'
    rcd += "n_mlp: " + str(n_mlp) + '\n'
    rcd += "mae: " + str(mae) + '\n'
    rcd += "time: " + str(end - start) + '\n' + '\n' + '\n'
    print(rcd)
    log_file = open(constants.DATA_PATH_RESULT + "mlp_result", "a")
    log_file.write(rcd)
    log_file.close()

    # mlp.save('ckpt/'+file_name[:-3]+'/'+file_name)

elif mode == constants.PRED:

    mlp.load_weights(file_name)
    predict(mlp)
Beispiel #4
0
              optimizer=adam,
              metrics=['mean_squared_error'])
# print(model.summary())
early_stopping = EarlyStopping(monitor="mean_squared_error",
                               patience=15,
                               mode='min')
model_checkpoint = ModelCheckpoint(weighs_path,
                                   monitor="mean_squared_error",
                                   verbose=2,
                                   save_best_only=True,
                                   mode='min')

# model.fit(x3_train, y3_train, epochs=300, batch_size=64, verbose=2, callbacks=[model_checkpoint])
# model.save_weights("F4.h5", overwrite=True)

model.load_weights(weighs_path)
# calculate predictions
pred_data = model.predict(np.array(x_te)).reshape(-1)
real_data = np.array(y_te).reshape(-1)

s, total = 0, 270
a, b = [], []
while s < total:
    a.append(pred_data[s] * 393.1)
    b.append(real_data[s] * 393.1)
    s += 1
np.save(pred_value_station, a)
np.save(true_value_station, b)
MAE = mean_absolute_error(b, a)
MSE = mean_squared_error(b, a)
print("the mse is:", np.sqrt(MSE))