コード例 #1
0
def train(data):
    X = np.asarray(data.drop(['ETA'], axis=1))
    y = np.asarray(data["ETA"])
    scaler = MinMaxScaler()
    X = scaler.fit_transform(X)
    with open("han_bike_scalers.pkl", "wb") as outfile:
        pkl.dump(scaler, outfile)
        upload_to_bucket('model/han_bike_scalers.pkl', 'han_bike_scalers.pkl',
                         'aha-ds-ml-pipeline')
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=0)
    model = KerasRegressor(build_fn=baseline_model,
                           epochs=2,
                           batch_size=3,
                           verbose=1)
    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        callbacks=[telegram_callback])
    #==============================================================================
    # Predict & Evaluation
    #==============================================================================
    prediction = model.predict(X_test)
    score = mean_absolute_error(y_test, prediction)
    if score < 5:
        model.model.save('han_bike_models.h5')
        upload_to_bucket('model/han_bike_models.h5', 'han_bike_models.h5',
                         'aha-ds-ml-pipeline')
    return model
コード例 #2
0
    def createModel(self):
        X = self.df[list(self.predictor_list.get(0, tk.END))].to_numpy()
        y = self.df[self.target_list.get(0)].to_numpy().reshape(-1)

        layers = self.no_optimization_choice_var.get()

        learning_rate = self.hyperparameters[4].get()
        momentum = self.hyperparameters[5].get()

        optimizers = {
            "Adam": Adam(learning_rate=learning_rate),
            "SGD": SGD(learning_rate=learning_rate, momentum=momentum),
            "RMSprop": RMSprop(learning_rate=learning_rate, momentum=momentum)
        }

        def base_model():
            model = Sequential()

            for i in range(layers):
                neuron_number = self.neuron_numbers_var[i].get()
                activation = self.activation_var[i].get()
                if i == 0:
                    model.add(
                        Dense(neuron_number,
                              activation=activation,
                              input_dim=X.shape[1]))
                else:
                    model.add(Dense(neuron_number, activation=activation))

            model.add(Dense(1, activation="relu"))
            model.compile(optimizer=optimizers[self.hyperparameters[2].get()],
                          loss=self.hyperparameters[3].get())
            return model

        do_forecast = self.do_forecast_option.get()
        val_option = self.validation_option.get()

        if val_option == 0 or val_option == 1:
            model = base_model()
        elif val_option == 2 or val_option == 3:
            model = KerasRegressor(build_fn=base_model,
                                   epochs=self.hyperparameters[0].get(),
                                   batch_size=self.hyperparameters[1].get())

        if val_option == 0:
            model.fit(X,
                      y,
                      epochs=self.hyperparameters[0].get(),
                      batch_size=self.hyperparameters[1].get())
            if do_forecast == 0:
                pred = model.predict(X).reshape(-1)
                losses = loss(y, pred)[:-1]
                self.y_test = y
                self.pred = pred
                for i, j in enumerate(losses):
                    self.test_metrics_vars[i].set(j)
            self.model = model

        elif val_option == 1:
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, train_size=self.random_percent_var.get() / 100)
            model.fit(X_train,
                      y_train,
                      epochs=self.hyperparameters[0].get(),
                      batch_size=self.hyperparameters[1].get())
            if do_forecast == 0:
                pred = model.predict(X_test).reshape(-1)
                losses = loss(y_test, pred)[:-1]
                self.y_test = y_test.reshape(-1)
                self.pred = pred
                for i, j in enumerate(losses):
                    self.test_metrics_vars[i].set(j)
            self.model = model

        elif val_option == 2:
            cvs = cross_validate(model,
                                 X,
                                 y,
                                 cv=self.cross_val_var.get(),
                                 scoring=skloss)
            for i, j in enumerate(list(cvs.values())[2:]):
                self.test_metrics_vars[i].set(j.mean())

        elif val_option == 3:
            cvs = cross_validate(model,
                                 X,
                                 y,
                                 cv=X.shape[0] - 1,
                                 scoring=skloss)
            for i, j in enumerate(list(cvs.values())[2:]):
                self.test_metrics_vars[i].set(j.mean())
コード例 #3
0
ファイル: main.py プロジェクト: NJCinnamond/AMLFinalExam
    # compile the keras model
    model.compile(loss='mse', optimizer='adam')
    return model


estimator = KerasRegressor(build_fn=baseline_model,
                           nb_epoch=20,
                           batch_size=1000,
                           verbose=1)

# fit the keras model on the dataset
estimator.fit(X_train, y_train, epochs=20, batch_size=1000, verbose=1)

# make class predictions with the model\
predictions = estimator.predict(X_test)

top_20_count = 0
correct_count = 0
pred_list = np.zeros((len(predictions), 2))
for i in range(len(predictions)):
    pred_list[i][0] = i
    pred_list_temp = np.zeros((len(y_test), 2))

    lowest_mse_index = None
    lowest_cur_mse = 10000000000000

    for j in range(len(y_test)):
        cur_mse = np.linalg.norm(predictions[i] - y_test[j])

        if cur_mse < lowest_cur_mse:
コード例 #4
0
                    kernel_regularizer=regularizer))
    model.add(LeakyReLU(alpha=0.05))
    model.add(BatchNormalization())
    # hidden layer: 5 nodes by default
    for l in range(layers):
        #model.add(Dense(l, activation=activation, kernel_regularizer=regularizer))
        model.add(Dense(nodes, kernel_regularizer=regularizer))
        model.add(LeakyReLU(alpha=0.05))
        model.add(BatchNormalization())
        #model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))
    #model.add(Dense(1, activation=activation))
    model.compile(loss="binary_crossentropy",
                  optimizer='adam',
                  metrics=['AUC'])
    return model


#model=KerasClassifier(build_fn=create_model, verbose=1)
model = KerasRegressor(build_fn=create_model, verbose=1)  #build the model
#result=model.fit(train, y_train, validation_split=0.1, epochs=best_params['epochs']) # train the model
result = model.fit(train, y_train, epochs=best_params['epochs'])

model.model.save("models/keras_model_" + outDir + ".h5")  # save the output

#plot the performance of the mode
y_train_pred = model.predict(train)
y_test_pred = model.predict(test)

make_plots('keras', result, outDir, y_train, y_test, y_train_pred, y_test_pred)
コード例 #5
0
    np.random.seed(seed)
    np.random.shuffle(y)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)

    if do_load:
        regressor = load_model(f'../models/kerasregressor_{X_col}.h5')
    else:
        regressor = KerasRegressor(build_fn=regression_model,
                                   epochs=epochs,
                                   batch_size=batch_size,
                                   verbose=1)
        h = regressor.fit(X_train, y_train)
        regressor.model.save(f'../models/kerasregressor_{X_col}.h5')

    _predictions = regressor.predict(X_test).round()
    predictions = list(zip(_predictions, y_test))
    prediction_df = pd.DataFrame(predictions, columns=['Predicted', 'Actual'])

    matrix = confusion_matrix(y_test, _predictions)
    sns.heatmap(matrix,
                cmap='coolwarm',
                linecolor='white',
                linewidths=1,
                xticklabels=CATEGORIES,
                yticklabels=CATEGORIES,
                annot=True,
                fmt='d',
                ax=ax)
    ax.set_title(f'{X_col}')
    ax.set_ylabel("True Label")
コード例 #6
0
    regressor = create_LSTM(neurons=clf.best_params_.get('neurons'),
                            dropoutRate=clf.best_params_.get('dropoutRate'),
                            constraints=clf.best_params_.get('constraints'))
elif recurrent_type == 'GRU':
    print('Creating GRU...')
    regressor = create_GRU(neurons=clf.best_params_.get('neurons'),
                           dropoutRate=clf.best_params_.get('dropoutRate'),
                           constraints=clf.best_params_.get('constraints'))
else:
    print('Wrong recurrent type, go with LSTM anyway.')
    regressor = create_LSTM(neurons=clf.best_params_.get('neurons'),
                            dropoutRate=clf.best_params_.get('dropoutRate'),
                            constraints=clf.best_params_.get('constraints'))

regressor.fit(X_train, y_train, epochs=50, batch_size=8)
y_pred_scaled = regressor.predict(X_test)
sc_flow = MinMaxScaler(feature_range=(0, 1), copy=True)
sc_flow.fit_transform(np.array(y_train_not_scaled).reshape(-1, 1))
y_pred = sc_flow.inverse_transform(y_pred_scaled)

# Evaluation
rootMSE(y_test_not_scaled, y_pred)

# =============================================================================
# New LSTM
# =============================================================================
'''
# Setting hyperparameters automatically from grid-searching results
best_neurons = clf.best_params_.get('neurons')
best_dropoutRate = clf.best_params_.get('dropoutRate')
constraints = clf.best_params_.get('constraints')
コード例 #7
0
    model.add(Dense(100, activation='relu'))
    model.add(Dense(100, activation='relu'))
    model.add(Dense(output_size))
    model.compile(loss='mean_absolute_error', optimizer='sgd')
    return model
#model = LogisticRegression(penalty='l1', dual=True, verbose=3)
#model = SVR(kernel='poly', degree=5, max_iter=10, verbose=True)
#model = KNeighborsRegressor(n_neighbors=5)
model = KerasRegressor(build_fn=regressor, batch_size=32, epochs=200)
#model = MLPRegressor(hidden_layer_sizes=(86,100,100,10), n_iter_no_change=20, max_iter=300, verbose=True, tol=.00000001, activation='relu')
#model_pwd = pwd+"/"+func+"/"+"Models/"+str(num_samples)+"_"+str(low)+"_"+str(high)+"_"+str(n)+"_"+str(d)+"_"+str(num_updates)+"_"+str(intercept)+".h5"
#model.save(model_pwd)
model.fit(Xtrain, ytrain)
#loss = model.evaluate(Xtest, ytest)
#loss = model.score(Xtrain,ytrain)
#ypreds = model.predict(Xtest, verbose=1)
ypreds = model.predict(Xtest)
ypreds_pwd = pwd+"/"+func+"/"+"Predictions/"+str(num_samples)+"_"+str(low)+"_"+str(high)+"_"+str(n)+"_"+str(d)+"_"+str(num_updates)+"_"+str(intercept)+".csv"
ds_manager.write_dataset(ypreds, ypreds_pwd)

print("Trying to learn: " + func)
print("number samples: " + str(num_samples))
print("range: " + "["+str(low)+", "+str(high)+"]")
print("number of points in original ds: " + str(n))
print("dim: " + str(d))
print("number of updates:" + str(num_updates))
print("intercept: " + str(intercept))
#print("loss on test set: " + str(loss))
actual_loss = mean_absolute_error(ytest, ypreds)
print("actual?: " + str(actual_loss))
# quick thing, shouldn't it just tell from the beta? like that shouldn't be too hard right?
コード例 #8
0
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
regressor = KerasRegressor(build_fn=model, batch_size=16, epochs=2000)

import tensorflow as tf
#print(tf)
#I have used Keras Regressor for this purpose. Saved best weights and used 2000 epochs. Mean Absolute Error is our Loss function.
# We have an input of 200 after dropping the columns. Next, We are going to obtain four values for each input, High, Low, Open, Close.
callback = tf.keras.callbacks.ModelCheckpoint(filepath='Regressor_model.h5',
                                              monitor='mean_absolute_error',
                                              verbose=0,
                                              save_best_only=True,
                                              save_weights_only=False,
                                              mode='auto')
results = regressor.fit(X_train, y_train, callbacks=[callback])

y_pred = regressor.predict(X_test)
print(y_pred)

print(y_test)

import numpy as np
y_pred_mod = []
y_test_mod = []

for i in range(0, 4):
    j = 0
    y_pred_temp = []
    y_test_temp = []

    while (j < len(y_test)):
        y_pred_temp.append(y_pred[j][i])
コード例 #9
0
print('Training time: {:.2f} mins.'.format((end_time - start_time) / 60.))

# Plot the losses vs epoch here
fig = plt.figure(figsize=(8, 5))
    
plot1, = plt.plot(history.epoch, history.history['loss'], c='blue', label='MAE')
plt.grid(which='both', linestyle='--')

ax = fig.gca()    
ax.set_xlabel(r'Epoch')
ax.set_ylabel(r'Loss')
plt.legend(bbox_to_anchor=(0.1, 0.0, 0.80, 1), bbox_transform=fig.transFigure, 
           loc='lower center', ncol=3, mode="expand", borderaxespad=0.)

plt.tight_layout()
plt.show()
plt.close(fig)

# Testing
with tf.device(device):
    y_pred = model.predict(X_test)
    loss = model.model.evaluate(X_test, y_test)
    
print('Test: Loss {:.4f}'.format(loss))

# Scoring
mse = ((y_test - y_pred) ** 2).sum() / y_test.shape[0]

# Reporting the number of parameters
num_params = model.model.count_params()
print('Number of parameters: {}'.format(num_params))
コード例 #10
0
    model = Sequential()
    model.add(
        Dense(4, input_dim=4, kernel_initializer='normal', activation='relu'))
    model.add(Dense(1, kernel_initializer='normal'))
    # Compile model
    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model


# evaluate model
estimator = KerasRegressor(build_fn=baseline_model,
                           epochs=10,
                           batch_size=5,
                           verbose=0)
estimator.fit(X, y)
prediction = estimator.predict(X)
train_error = np.abs(y - prediction)
mean_error = np.mean(train_error)
min_error = np.min(train_error)
max_error = np.max(train_error)
std_error = np.std(train_error)
print(mean_error)
print(std_error)
'''
kfold = KFold(n_splits=10)
results = cross_val_score(estimator, X, Y, cv=kfold)
print("Baseline: %.2f (%.2f) MSE" % (results.mean(), results.std()))
'''
コード例 #11
0
    def createModel(self):
        clear_session()
        X, y = self.getData()
        print(self.scale_var.get())

        layers = self.no_optimization_choice_var.get()
        
        learning_rate = self.hyperparameters[4].get()
        momentum = self.hyperparameters[5].get()

        optimizers = {
                "Adam": Adam(learning_rate=learning_rate),
                "SGD": SGD(learning_rate=learning_rate, momentum=momentum),
                "RMSprop": RMSprop(learning_rate=learning_rate, momentum=momentum)
                }
        
        def base_model():
            model = Sequential()

            for i in range(layers):
                neuron_number = self.neuron_numbers_var[i].get()
                activation = self.activation_var[i].get()
                if i == 0:
                    model.add(Dense(neuron_number, activation=activation, input_dim=X.shape[1], kernel_initializer=GlorotUniform(seed=0)))
                else:
                    model.add(Dense(neuron_number, activation=activation, kernel_initializer=GlorotUniform(seed=0)))

            model.add(Dense(1, activation=self.output_activation.get(), kernel_initializer=GlorotUniform(seed=0)))
            model.compile(optimizer=optimizers[self.hyperparameters[2].get()], loss=self.hyperparameters[3].get())
            return model

        do_forecast = self.do_forecast_option.get()
        val_option = self.validation_option.get()

        if val_option == 0 or val_option == 1:
            model = base_model()
        elif val_option == 2 or val_option == 3:
            model = KerasRegressor(build_fn=base_model, epochs=self.hyperparameters[0].get(), batch_size=self.hyperparameters[1].get())

        if val_option == 0:
            model.fit(X, y, epochs=self.hyperparameters[0].get(), batch_size=self.hyperparameters[1].get())
            if do_forecast == 0:
                pred = model.predict(X).reshape(-1)
                losses = loss(y, pred)[:-1]
                self.y_test = y
                self.pred = pred
                for i,j in enumerate(losses):
                    self.test_metrics_vars[i].set(j)
            self.model = model

        elif val_option == 1:
            if do_forecast == 0:
                X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=self.random_percent_var.get()/100)
                model.fit(X_train, y_train, epochs=self.hyperparameters[0].get(), batch_size=self.hyperparameters[1].get())
                pred = model.predict(X_test).reshape(-1)
                losses = loss(y_test, pred)[:-1]
                self.y_test = y_test.reshape(-1)
                self.pred = pred
                for i,j in enumerate(losses):
                    self.test_metrics_vars[i].set(j) 
            else:
                size = int((self.random_percent_var.get()/100)*len(X))
                X = X[-size:]
                y = y[-size:]
                model.fit(X, y, epochs=self.hyperparameters[0].get(), batch_size=self.hyperparameters[1].get())

            self.model = model

        elif val_option == 2:
            if do_forecast == 0:
                cvs = cross_validate(model, X, y, cv=self.cross_val_var.get(), scoring=skloss)
                for i, j in enumerate(list(cvs.values())[2:]):
                    self.test_metrics_vars[i].set(j.mean())
        
        elif val_option == 3:
            if do_forecast == 0:
                cvs = cross_validate(model, X, y, cv=X.shape[0]-1, scoring=skloss)
                for i, j in enumerate(list(cvs.values())[2:]):
                    self.test_metrics_vars[i].set(j.mean())

        self.model.summary()