예제 #1
0
    fname_model = "pickled_files/models/mlp_regression_" + str(
        serial_num) + ".pkl"
    mlpr.dump(fname_model)
    hash_model = model_db.find_hash(fname_model)
    model_db.store_cur_data([hash_model], columns=['model_hash'])

    # Store the data that trained the model
    data.dump_X(serial_num=serial_num)
    data.dump_Y(serial_num=serial_num)

    # Store all the data in the data db and dump the db
    model_db.store_data(serial_num)
    model_db.dump()

    # Analyze the model
    mlpr_acc = mlpr.evaluate()
    print(mlpr_acc)

    # Make predictions using the testing set
    pred_y = mlpr.predict(data.test_x)

    # The coefficients
    # print('Coefficients: \n', mlpr.model.coefs_)
    # The mean squared error
    print("Mean squared error: %.2f" % mean_squared_error(data.test_y, pred_y))
    # Explained variance score: 1 is perfect prediction
    print('Variance score: %.2f' % r2_score(data.test_y, pred_y))

    # Plot outputs
    x = []
    for i, feat_vec in enumerate(data.test_x):
예제 #2
0
파일: test.py 프로젝트: h83s/load_forecast

#create model
print('creating model')

model = Sequential()
model.add(Dense(100, input_dim=1, init='uniform', activation='relu'))
model.add(Dense(24, init='uniform', activation='sigmoid'))
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mean_squared_error'])

# training
print('Training')

model.fit(X, load_dh, batch_size=10, nb_epoch=10000, verbose=2, validation_split=0.3, shuffle=True)

scores = model.evaluate(X, load_dh)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]))



# Multilayer Perceptron to Predict International Airline Passengers (t+1, given t, t-1, t-2)
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
	dataX, dataY = [], []
	for i in range(len(dataset)-look_back-1):
예제 #3
0
    # Initializing the model
    model = Sequential()
    model.add(Dense(150, input_dim=13, activation="relu"))
    model.add(Dense(200, activation="tanh"))
    model.add(Dense(120, activation="tanh"))
    model.add(Dense(200, activation="tanh"))
    model.add(Dense(num_of_classes, activation="softmax"))
    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])
    return model


# building a cnn model using train data set and validating on test data set
model = design_mlp()

# fitting model on train data
model.fit(x=x_train, y=y_train, batch_size=500, epochs=5)

# Evaluating the model on test data
eval_score_test = model.evaluate(x_test, y_test, verbose=1)

# accuracy on test data set
print("Accuracy: %.3f%%" % (eval_score_test[1] * 100))

# Evaluating the model on train data
eval_score_train = model.evaluate(x_train, y_train, verbose=0)

# accuracy on train data set
print("Accuracy: %.3f%%" % (eval_score_train[1] * 100))
#######################################END###################################
예제 #4
0
    train_columns = ['season', 'month', 'hour', 'holiday', 'weekday', 'workingday', 'weather', 'temp', 'humidity']
    X = df_train[[x for x in all_columns if x.startswith(tuple(train_columns))]]  # getting all desired
    # print(X)
    X = X.drop(columns=['weather_4'])
    print(X.columns)
    y = df_train['count']

    # Creating the split
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

    # Initializing MLPRegressor
    neural = MLPRegressor(hidden_layer_sizes=(100, 60, 40, 20), activation='relu', solver='lbfgs', alpha=0.0001,
                          verbose=True)
    neural.fit(X_train, y_train)

    _, test_acc = neural.evaluate(X_test, y_test, verbose=0)  # evaluating MLPRegressor
    print('Test: %.3f' % test_acc)

    # Initializing Sequential NN
    model = sequential_nn_model(X_train, y_train)

    df_test['weather_4'] = 0
    df_test = df_test[[x for x in all_columns if x.startswith(tuple(train_columns))]]
    df_test = df_test.drop(columns=['weather_4'])
    print(df_test.columns)
    test_array = df_test.to_numpy()
    predictions = model.predict(test_array)
    individual_predictions = [transform_list_item(x) for x in predictions]
    for i, y in enumerate(individual_predictions):
        if individual_predictions[i] < 0:
            individual_predictions[i] = 0