def ex_1_1_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 a) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ params_n_h = [2, 8, 40, 100] for n_h in params_n_h: nn = MLPRegressor(solver='lbfgs', max_iter=200, activation='logistic', hidden_layer_sizes=(n_h, ), alpha=0, verbose=False, random_state=0) # zero randomness # verbose=True nn.fit(x_train, y_train) y_pred_train = nn.predict(x_train) y_pred_test = nn.predict(x_test) plot_learned_function(n_h, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test) pass
def ex_1_1_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 a) Remember to set alpha to 0 when initializing the model. :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO nh = 50 nn = MLPRegressor(activation='logistic', solver='lbfgs', max_iter=5000, alpha=0, hidden_layer_sizes=(nh, )) nn.fit(x_train, y_train) y_pred_train = nn.predict(x_train) y_pred_test = nn.predict(x_test) plot_learned_function(nh, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test) pass
def ex_1_1_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 a) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO - done # using 8 hidden neurons n_h = [2, 8, 40] for i in n_h: # use random state for a fixed split - guarantees always same output (200 seems beautiful) nn = MLPRegressor(activation='logistic', solver='lbfgs', alpha=0.0, hidden_layer_sizes=(i,), max_iter=200, random_state=200) nn.fit(x_train, y_train) pred_train_y = nn.predict(x_train) pred_test_y = nn.predict(x_test) plot_learned_function(n_h, x_train, y_train, pred_train_y, x_test, y_test, pred_test_y)
def ex_1_1_c(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 c) Remember to set alpha to 0 when initializing the model Use max_iter = 10000 and tol=1e-8 :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO n_h = [1, 2, 3, 4, 6, 8, 12, 20, 40] train_array = np.zeros((9, 10)) test_array = np.zeros((9, 10)) for n in n_h: for i in range(0, 10): index = n_h.index(n) nn = MLPRegressor(tol=1e-8, activation='logistic', solver='lbfgs', alpha=0.0, hidden_layer_sizes=(n_h[index],), max_iter=10000, random_state=i) nn.fit(x_train, y_train) train_array[index][i] = calculate_mse(nn, x_train, y_train) test_array[index][i] = calculate_mse(nn, x_test, y_test) y_pred_train = nn.predict(x_train) y_pred_test = nn.predict(x_test) if n == 1: plot_learned_function(n, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test) plot_mse_vs_neurons(np.array(train_array), np.array(test_array), n_h)
def ex_1_1_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 a) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO regressor = MLPRegressor( hidden_layer_sizes=(40, ), #8,40 solver="lbfgs", activation="logistic", alpha=0.0, max_iter=200, ) regressor.fit(x_train, y_train) #plot_learned_function(2,x_train, y_train,regressor.predict(x_train), x_test, y_test, regressor.predict(x_test)) #plot_learned_function(8, x_train, y_train, regressor.predict(x_train), x_test, y_test, regressor.predict(x_test)) plot_learned_function(40, x_train, y_train, regressor.predict(x_train), x_test, y_test, regressor.predict(x_test)) pass
def ex_1_1_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 a) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ n_hidden = 40 regressor = MLPRegressor(hidden_layer_sizes=(n_hidden, ), activation='logistic', solver='lbfgs', alpha=0, max_iter=200) regressor.fit(x_train, y_train) y_pred_train = regressor.predict(x_train) y_pred_test = regressor.predict(x_test) plot_learned_function(n_hidden, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test) # [train_mses, test_mses] = calculate_mse(regressor, [x_train, x_test], [y_train, y_test]) # plot_mse_vs_neurons(train_mses, test_mses, n_hidden_neurons_list) pass
def ex_1_1_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 a) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO #pass n_hidden = 5 # 2, 5, 50 reg = MLPRegressor(hidden_layer_sizes=(n_hidden, 8), activation='logistic', solver='lbfgs', alpha=0) reg.fit(x_train, y_train) y_pred_test = reg.predict(x_test) y_pred_train = reg.predict(x_train) plot_learned_function(n_hidden, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test)
def ex_1_1_d(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 b) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO #pass N = 500 n_hidden = [2, 5, 50] mse_train = np.zeros([np.size(n_hidden), N]) mse_test = np.zeros([np.size(n_hidden), N]) for j in range(np.size(n_hidden)): reg = MLPRegressor(hidden_layer_sizes=(n_hidden[j], ), activation='logistic', solver='lbfgs', alpha=0, random_state=0, warm_start=True, max_iter=1) for r in range(N): reg.fit(x_train, y_train) mse_train[j, r] = calculate_mse(reg, x_train, y_train) mse_test[j, r] = calculate_mse(reg, x_test, y_test) # PLOT plot_mse_vs_neurons(mse_train, mse_test, n_hidden) #mse_test_mean = np.mean(mse_test, axis=1) #ind = np.argmin(mse_test_mean) ind = np.unravel_index(np.argmin(mse_test), mse_test.shape) # geht es auch ohne den MLPRegressor nochmal zu initialisieren? Keine Ahnung obs anders besser geht reg = MLPRegressor(hidden_layer_sizes=(n_hidden[j], ), activation='logistic', solver='lbfgs', alpha=0, random_state=random.randint(0, 1000), max_iter=500) reg.fit(x_train, y_train) y_pred_test = reg.predict(x_test) y_pred_train = reg.predict(x_train) plot_learned_function(n_hidden[ind[0]], x_train, y_train, y_pred_train, x_test, y_test, y_pred_test)
def ex_1_1_c(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 c) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO #pass N = 10 n_hidden = [1, 2, 3, 4, 6, 8, 12, 20, 40] mse_train = np.zeros([np.size(n_hidden), N]) mse_test = np.zeros([np.size(n_hidden), N]) for j in range(np.size(n_hidden)): reg = MLPRegressor(hidden_layer_sizes=(1, n_hidden[j]), activation='logistic', solver='lbfgs', alpha=0, random_state=random.randint(0, 1000)) for r in range(N): reg.fit(x_train, y_train) mse_train[j, r] = calculate_mse(reg, x_train, y_train) mse_test[j, r] = calculate_mse(reg, x_test, y_test) # PLOT plot_mse_vs_neurons(mse_train, mse_test, n_hidden) """ mse_test_mean = np.mean(mse_test, axis=1) ind = np.argmin(mse_test_mean) """ ind = np.unravel_index(np.argmin(mse_test), mse_test.shape) reg = MLPRegressor(hidden_layer_sizes=(n_hidden[ind[0]], ), activation='logistic', solver='lbfgs', alpha=0, random_state=random.randint(0, 1000)) reg.fit(x_train, y_train) y_pred_test = reg.predict(x_test) y_pred_train = reg.predict(x_train) plot_learned_function(n_hidden[ind[0]], x_train, y_train, y_pred_train, x_test, y_test, y_pred_test)
def ex_1_1_c(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 c) Remember to set alpha to 0 when initializing the model Use max_iter = 10000 and tol=1e-8 :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ m = 0 n = 0 #declaring variables used in MLP-Regressor hidden_layers = np.array([1, 2, 3, 4, 6, 8, 12, 20, 40]) random_state = 10 activation_mode = 'logistic' solver_mode = 'lbfgs' alpha = 0 max_iter = 10000 tol = 1e-8 train_mse = np.zeros((hidden_layers.size, random_state)) test_mse = np.zeros((hidden_layers.size, random_state)) for m in range(random_state): for n in range(hidden_layers.size): nn = MLPRegressor(hidden_layer_sizes=(hidden_layers[n], ), activation=activation_mode, solver=solver_mode, alpha=alpha, max_iter=max_iter, random_state=m, tol=tol) nn.fit(x_train, y_train) #calculate for every random seed train_mse and test_mse train_mse[n][m] = calculate_mse(nn, x_train, y_train) test_mse[n][m] = calculate_mse(nn, x_test, y_test) plot_mse_vs_neurons(train_mse, test_mse, hidden_layers) y_test_pred = nn.predict(x_test) y_train_pred = nn.predict(x_train) plot_learned_function(40, x_train, y_train, y_train_pred, x_test, y_test, y_test_pred) pass
def ex_1_1_c(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 c) Remember to set alpha to 0 when initializing the model Use max_iter = 10000 and tol=1e-8 :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO hidden_neurons_totest = np.array([1, 2, 3, 4, 6, 8, 12, 20, 40]) # hidden_neurons_totest = np.array([20]) dim1 = hidden_neurons_totest.shape[0] mse_test_matrix = np.zeros((dim1, 10)) mse_train_matrix = np.zeros((dim1, 10)) k = 0 for i in hidden_neurons_totest: n_hidden_neurons = i for j in range(10): nn = MLPRegressor(activation='logistic', solver='lbfgs', max_iter=10000, tol=1e-8, hidden_layer_sizes=(n_hidden_neurons,), alpha=0, random_state=j) nn.fit(x_train, y_train) predictions_test = nn.predict(x_test) mse_test_matrix[k, j] = calculate_mse(nn, x_test, y_test) mse_train_matrix[k, j] = calculate_mse(nn, x_train, y_train) k += 1 plot_mse_vs_neurons(mse_train_matrix, mse_test_matrix, hidden_neurons_totest) plt.show() plot_learned_function(40, x_train, y_train, 0, x_test, y_test, predictions_test) plt.show()
def ex_1_2_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.2 a) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO alpha_values = np.array([1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 100]) dim1 = alpha_values.shape[0] mse_test_matrix = np.zeros((dim1, 10)) mse_train_matrix = np.zeros((dim1, 10)) k = 0 for i in alpha_values: alpha = i for j in range(10): nn = MLPRegressor(activation='logistic', solver='lbfgs', max_iter=200, hidden_layer_sizes=(40,), alpha=i, random_state=j) nn.fit(x_train, y_train) predictions_test = nn.predict(x_test) mse_test_matrix[k, j] = calculate_mse(nn, x_test, y_test) mse_train_matrix[k, j] = calculate_mse(nn, x_train, y_train) k += 1 plot_mse_vs_alpha(mse_train_matrix, mse_test_matrix, alpha_values) plt.show()
def ex_1_1_c(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 c) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO n_seeds = 10 n_neur = [1, 2, 3, 4, 6, 8, 12, 20, 40] mse_train = np.zeros([np.size(n_neur), n_seeds]) mse_test = np.zeros([np.size(n_neur), n_seeds]) for h in range(np.size(n_neur)): for s in range(n_seeds): seed = np.random.randint(100) reg = MLPRegressor(hidden_layer_sizes=(n_neur[h], ), max_iter=5000, activation='logistic', solver='lbfgs', alpha=0, random_state=seed) reg.fit(x_train, y_train) mse_train[h, s] = calculate_mse(reg, x_train, y_train) mse_test[h, s] = calculate_mse(reg, x_test, y_test) plot_mse_vs_neurons(mse_train, mse_test, n_neur) sum_mse = mse_test.sum(axis=1) ind_min = sum_mse.argmin() reg = MLPRegressor(hidden_layer_sizes=(n_neur[ind_min], ), max_iter=5000, activation='logistic', solver='lbfgs', alpha=0, random_state=np.random.randint(100)) reg.fit(x_train, y_train) y_pred_test = reg.predict(x_test) y_pred_train = reg.predict(x_train) plot_learned_function(n_neur[ind_min], x_train, y_train, y_pred_train, x_test, y_test, y_pred_test)
class MLPRegressorImpl(): def __init__(self, hidden_layer_sizes=(100, ), activation='relu', solver='adam', alpha=0.0001, batch_size='auto', learning_rate='constant', learning_rate_init=0.001, power_t=0.5, max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08, n_iter_no_change=10): self._hyperparams = { 'hidden_layer_sizes': hidden_layer_sizes, 'activation': activation, 'solver': solver, 'alpha': alpha, 'batch_size': batch_size, 'learning_rate': learning_rate, 'learning_rate_init': learning_rate_init, 'power_t': power_t, 'max_iter': max_iter, 'shuffle': shuffle, 'random_state': random_state, 'tol': tol, 'verbose': verbose, 'warm_start': warm_start, 'momentum': momentum, 'nesterovs_momentum': nesterovs_momentum, 'early_stopping': early_stopping, 'validation_fraction': validation_fraction, 'beta_1': beta_1, 'beta_2': beta_2, 'epsilon': epsilon, 'n_iter_no_change': n_iter_no_change } def fit(self, X, y=None): self._sklearn_model = SKLModel(**self._hyperparams) if (y is not None): self._sklearn_model.fit(X, y) else: self._sklearn_model.fit(X) return self def predict(self, X): return self._sklearn_model.predict(X)
def ex_1_1_c(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 c) Remember to set alpha to 0 when initializing the model Use max_iter = 10000 and tol=1e-8 :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ n_hidden_neurons_list = [1, 2, 3, 4, 6, 8, 12, 20, 40] seeds = 10 mse = np.zeros((len(n_hidden_neurons_list), seeds, 2)) for i in range(len(n_hidden_neurons_list)): for j in range(seeds): regressor = MLPRegressor( hidden_layer_sizes=(n_hidden_neurons_list[i], ), activation='logistic', solver='lbfgs', alpha=0, max_iter=10000, random_state=j, tol=1e-8) regressor.fit(x_train, y_train) # mse shape: [train_mses, test_mses] mse[i][j] = calculate_mse(regressor, [x_train, x_test], [y_train, y_test]) plot_mse_vs_neurons(mse[:, :, 0], mse[:, :, 1], n_hidden_neurons_list) n_hidden = 40 regressor = MLPRegressor(hidden_layer_sizes=(n_hidden, ), activation='logistic', solver='lbfgs', alpha=0, max_iter=10000, tol=1e-8) regressor.fit(x_train, y_train) y_pred_train = regressor.predict(x_train) y_pred_test = regressor.predict(x_test) plot_learned_function(n_hidden, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test) ## TODO pass
def ex_1_1_c(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 c) Remember to set alpha to 0 when initializing the model. :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO nh = [1, 2, 4, 6, 8, 12, 20, 40] mse_all_train = np.zeros(shape=(8, 10)) mse_all_test = np.zeros(shape=(8, 10)) for i in range(0, 10): for j in range(0, 8): seed = np.random.randint(1, 100) nn = MLPRegressor(activation='logistic', solver='lbfgs', max_iter=5000, alpha=0, hidden_layer_sizes=(nh[j], ), random_state=seed) nn.fit(x_train, y_train) mse_train = calculate_mse(nn, x_train, y_train) mse_test = calculate_mse(nn, x_test, y_test) mse_all_train[j][i] = mse_train mse_all_test[j][i] = mse_test plot_mse_vs_neurons(mse_all_train, mse_all_test, nh) nn = MLPRegressor(activation='logistic', solver='lbfgs', max_iter=5000, alpha=0, hidden_layer_sizes=(nh[2], )) nn.fit(x_train, y_train) y_pred_train = nn.predict(x_train) y_pred_test = nn.predict(x_test) plot_learned_function(nh[2], x_train, y_train, y_pred_train, x_test, y_test, y_pred_test) pass
def main(test_X, test_y): m = MLPRegressor(hidden_layer_sizes=24, learning_rate_init=0.1, max_iter=500) # 6. 预测 m.fit(train_X, train_y) yhat = m.predict(test_X) test_X = test_X.reshape((test_X.shape[0], TIME_STEPS * INPUT_DIMS)) inv_yhat = inverse_trans(yhat, test_X, scaler, N_TRAIN_WEEKS, INPUT_DIMS) inv_y = inverse_trans(test_y, test_X, scaler, N_TRAIN_WEEKS, INPUT_DIMS) mae_array.append(mean_absolute_error(inv_y, inv_yhat)) rmse_array.append(sqrt(mean_squared_error(inv_y, inv_yhat)))
def ex_1_1_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 a) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ n_hidden = 40 trained_regressor = MLPRegressor(hidden_layer_sizes=(n_hidden, ), activation='logistic', solver='lbfgs', alpha=0, max_iter=200, random_state=1000) trained_regressor = trained_regressor.fit(x_train, y_train) y_pred_train = trained_regressor.predict(x_train) y_pred_test = trained_regressor.predict(x_test) plot_learned_function(n_hidden, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test) pass
def ex_1_1_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 a) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ ## TODO for i in [2, 8, 40]: n_hidden_neurons = i nn = MLPRegressor(activation='logistic', solver='lbfgs', max_iter=200, hidden_layer_sizes=(n_hidden_neurons,), alpha=0) nn.fit(x_train, y_train) predictions_test = nn.predict(x_test) plot_learned_function(n_hidden_neurons, x_train, y_train, 0, x_test, y_test, predictions_test) plt.show()
def ex_1_2_c(x_train, x_test, y_train, y_test): """ Solution for exercise 1.2 c) :param x_train: :param x_test: :param y_train: :param y_test: :return: """ ## TODO #pass total_iter = 10000 epoch_iter = 20 epochs = total_iter // epoch_iter n_neuro = 6 n_seeds = 10 """ sequence = np.random.permutation(np.arange(0, np.size(y_train), 1)) x_train = x_train[sequence] y_train = y_train[sequence] SIZE = int(np.ceil(np.size(y_train) / 3)) x_val = x_train[:SIZE] y_val = y_train[:SIZE] x_train = x_train[SIZE:] y_train = y_train[SIZE:] """ x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.33) mse_train = np.zeros([n_seeds, epochs]) mse_val = np.zeros([n_seeds, epochs]) mse_test = np.zeros([n_seeds, epochs]) #seeds = random.sample(range(1, 100), n_seeds) seeds = np.zeros(n_seeds) for s in range(n_seeds): seed = s #np.random.randint(100) seeds[s] = seed reg = MLPRegressor(hidden_layer_sizes=(n_neuro, ), activation='logistic', solver='lbfgs', alpha=1e-3, random_state=seed, warm_start=True, max_iter=epoch_iter) for ep in range(epochs): reg.fit(x_train, y_train) mse_train[s, ep] = calculate_mse(reg, x_train, y_train) mse_val[s, ep] = calculate_mse(reg, x_val, y_val) mse_test[s, ep] = calculate_mse(reg, x_test, y_test) y_pred_test = reg.predict(x_test) y_pred_train = reg.predict(x_train) plot_learned_function(n_neuro, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test) #min_val_index = np.argmin(mse_val, axis=1) import pdb pdb.set_trace() min_val_index = np.unravel_index(mse_val.argmin(), mse_val.shape) error_min_seed = seeds[min_val_index[0]] error_min #print(seeds) #print(min_val_index) print('Seed: ', error_min_seed)
from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import scale from sklearn.preprocessing import scale ======= >>>>>>> master FILENAME = '../../data/files/anninputs/nonnormalizedinputs/01_6.csv' a = pd.read_csv(FILENAME) #print(a) #print(scale(a['tsa_09'])) in_a_tsa = np.array(scale(a['tsa_09'].values.reshape(-1,1))) test_a_tsa = np.array(scale(a['tsa_09'].values.reshape(-1,1))) print(in_a_tsa) a['tsa_10'] = scale(a['tsa_10']) #print('atsa',a[['tsa_09', 'tsa_10']]) std = StandardScaler() x = std.fit_transform(a['rainfall_01'].values.reshape(-1,1)) print('x',x) nn = MLPRegressor() print(len(in_a_tsa), len(x)) nn.fit(in_a_tsa, x.ravel()) y = pd.DataFrame(nn.predict(test_a_tsa)) print(std.inverse_transform(y.values.reshape(-1,1)))
home = pd.DataFrame(le.transform(X[['HOME']])) away = pd.DataFrame(le.transform(X[['AWAY']])) X = pd.concat([home, away, X], axis=1) X = X.drop(['HOME', 'AWAY', 'DATE_VALUE'], axis=1) X_train = X.iloc[298:591] X_test = X.iloc[259:297] y_train = y.iloc[298:591] y_test = y.iloc[259:297] #print(X_train) #print(y_train) regressor = MLPRegressor(hidden_layer_sizes=(10, 5), solver='sgd', max_iter=2000) regressor.fit(X_train, y_train.squeeze().tolist()) print(regressor.score(X_train, y_train.squeeze().tolist())) print(regressor.score(X_test, y_test.squeeze().tolist())) print(regressor.get_params()) y_predict = regressor.predict(X_test) plt.plot(y_test.squeeze().tolist(), y_predict, 'o'); plt.show()
formatter = dtFrm.LANLDataFormatter(data_df=data_df, data_type='train', doTransform=True, doScale=True, cols_to_keep=50) data_df = formatter.transform() most_dependent_columns = formatter.getMostImpCols() # data_df = data_df.drop(['acc_max','acc_min','chg_acc_max','chg_acc_min'],axis=1) # Splitting data into test_random_forest and train # train_set, test_set = train_test_split(data_df, test_size=0.2, random_state=np.random.randint(1, 1000)) # Separate output from inputs y_train = data_df['time_to_failure'] x_train_seg = data_df['segment_id'] x_train = data_df.drop(['time_to_failure', 'segment_id'], axis=1) y_train = np.around(y_train.values, decimals=2) # mlpReg = MLPRegressor(verbose=True, tol=0.0001, max_iter=200000, n_iter_no_change=10000, hidden_layer_sizes=(200,)) mlpReg = MLPRegressor(verbose=True, max_iter=1000) mlpReg.fit(x_train, y_train) # Create an variable to pickle and open it in write mode mh = ModelHolder(mlpReg, most_dependent_columns) mh.save('mlp_regression.model') mlpReg = None mh_new = load_model(model_name) mlpReg, most_dependent_columns = mh_new.get() y_pred = mlpReg.predict(x_train) # y_pred = pd.Series(y_pred).apply(lambda x: float(x / 10)) print('MAE for Multi Layer Perceptron', mean_absolute_error(y_train, y_pred))
def ex_1_1_a(x_train, x_test, y_train, y_test): """ Solution for exercise 1.1 a) Remember to set alpha to 0 when initializing the model :param x_train: The training dataset :param x_test: The testing dataset :param y_train: The training targets :param y_test: The testing targets :return: """ # declaring hidden layer neurons 2, 8, 40 hidden_layer_2 = 2 hidden_layer_8 = 8 hidden_layer_40 = 40 # declaring variables used in MLP-Regressor activation_mode = 'logistic' solver_mode = 'lbfgs' alpha = 0 max_iter = 200 # declaring MLP-Regressor: nn_2 = MLPRegressor(hidden_layer_sizes=(hidden_layer_2, ), activation=activation_mode, solver=solver_mode, alpha=alpha, max_iter=max_iter) nn_8 = MLPRegressor(hidden_layer_sizes=(hidden_layer_8, ), activation=activation_mode, solver=solver_mode, alpha=alpha, max_iter=max_iter) nn_40 = MLPRegressor(hidden_layer_sizes=(hidden_layer_40, ), activation=activation_mode, solver=solver_mode, alpha=alpha, max_iter=max_iter) # train neural network using the regressor method fit nn_2.fit(x_train, y_train) nn_8.fit(x_train, y_train) nn_40.fit(x_train, y_train) # compute the output using the method predict y_test_pred_2 = nn_2.predict(x_test) y_train_pred_2 = nn_2.predict(x_train) y_test_pred_8 = nn_8.predict(x_test) y_train_pred_8 = nn_8.predict(x_train) y_test_pred_40 = nn_40.predict(x_test) y_train_pred_40 = nn_40.predict(x_train) # plotting learned function #def plot_learned_function(n_hidden, x_train, y_train, y_pred_train, x_test, y_test, y_pred_test): plot_learned_function(hidden_layer_2, x_train, y_train, y_train_pred_2, x_test, y_test, y_test_pred_2) plot_learned_function(hidden_layer_8, x_train, y_train, y_train_pred_8, x_test, y_test, y_test_pred_8) plot_learned_function(hidden_layer_40, x_train, y_train, y_train_pred_40, x_test, y_test, y_test_pred_40) pass
df1 = pd.DataFrame({'x': x, 'y': y, 'z': z}) a = [random.random() for i in range(10)] b = [random.random() for i in range(10)] c = [random.random() for i in range(10)] df2 = pd.DataFrame({'a': x, 'b': y, 'c': z}) mlp1 = MLPRegressor(solver='lbfgs', early_stopping=True) mlp2 = MLPRegressor(solver='lbfgs', early_stopping=True) mlp1 = mlp1.fit(df1[['x','y']][0:8], df1['z'][0:8]) mlp2 = mlp2.fit(df2[['a','b']][0:8], df2['c'][0:8]) result1 = mlp1.predict(df1[['x','y']][8:10]) result2 = mlp2.predict(df2[['a','b']][8:10]) dif1 = abs(df1['z'][8:10] - abs(result1))/df1['z'][8:10] mymape1 = 100/len(result1) * dif1.sum() dif2 = abs(df2['c'][8:10] - abs(result2))/df2['c'][8:10] mymape2 = 100/len(result2) * dif2.sum() print('mymape1', mymape1) print('mymape2', mymape2) mae1 = mae(df1['z'][8:10], result1) mape1 = 100*mae1