Ejemplo n.º 1
0
def createPredictionModel(trainX, trainY, method = 'LR'):
    print('Creating base prediction model')

    if method == 'LR':
        model = LinearRegression(fit_intercept=True)

    elif method == 'Ridge':
        model = Ridge(5)

    elif method == 'keras':
        from keras.models import Sequential
        from keras.layers import Dense, Dropout
        model = Sequential()
        model.add(Dense(units=3, activation='relu', input_dim=trainX.shape[1]))
        model.add(Dropout(0.01))
        model.add(Dense(units=1))
        model.compile(loss='mse', optimizer='adam')


    else:
        print('Unknown model! Implement yourself?')
        raise ValueError()

    model.fit(trainX, trainY)#, epochs=50, verbose=2) #do not use model = when using keras

    res = model.predict(trainX) - trainY  # calculate residuals
    stdevRes = res.std()  # calculate the standard deviation of the residuals

    return model, res, stdevRes
Ejemplo n.º 2
0
                def get_classifier(name_of_reg, params):
                    reg = None

                    if name_of_reg == 'MultiLinear':
                        reg = LinearRegression()
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'Support Vector':
                        reg = SVR(kernel=params['kernel'])
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'DecisionTree':
                        reg = DecisionTreeRegressor(random_state=0)
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'RandomForest':
                        reg = RandomForestRegressor(
                            n_estimators=params['n_estimators'],
                            random_state=0,
                            min_samples_leaf=.0001)
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'ExtraTree':
                        reg = ExtraTreesRegressor(
                            n_estimators=params['n_estimators'])
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'GradientBoosting':
                        reg = GradientBoostingRegressor(random_state=0)
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'XGBoost':
                        reg = XGBRegressor()
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'Artificial Neural Network':
                        reg = tf.keras.models.Sequential()
                        reg.add(
                            tf.keras.layers.Dense(units=60, activation='relu'))
                        reg.add(
                            tf.keras.layers.Dense(units=60, activation='relu'))
                        reg.add(tf.keras.layers.Dense(units=1))
                        reg.compile(optimizer='adam',
                                    loss='mean_squared_error')
                        reg.fit(X_train,
                                y_train,
                                batch_size=32,
                                epochs=params['epochs'])

                    else:
                        st.warning('Select your choice of algorithm')

                    return reg
 def fitted_q_iteration(self,
                        four_tuples_set,
                        stop,
                        algo="Linear Regression"):
     N = 0
     print("Computing fitted q iteration ...")
     print("{} / 100".format((N / stop) * 100))
     while N < stop:  # temp stopping condition
         N += 1
         X = []
         y = []
         for t in four_tuples_set:
             X.append([t[0][0], t[0][1], t[1]])
             if N == 1:
                 y.append(t[2])
             else:
                 y.append(t[2] + self.domain.d_factor * max(
                     self.qn_approximation[-1].predict(
                         np.array([t[3][0], t[3][1], -4]).reshape(1, -1))
                     [0], self.qn_approximation[-1].predict(
                         np.array([t[3][0], t[3][1], 4]).reshape(1, -1))[0])
                          )
         if algo == "Linear Regression":
             model = LinearRegression()
         elif algo == "Extremely Randomized Trees":
             model = ExtraTreesRegressor(n_estimators=10)
         elif algo == "Neural Network":
             X = np.array(X)
             y = np.array(y)
             model = Sequential()
             model.add(Dense(8, input_dim=3, activation='relu'))
             model.add(Dense(4, activation='relu'))
             model.add(Dense(1, activation='linear'))
             model.compile(loss='mean_squared_error',
                           optimizer='adam',
                           metrics=['mse'])
             # model.fit(X, y, epochs=10, batch_size=10, verbose=0)
             model.fit(X, y, epochs=100, batch_size=128, verbose=2)
             self.qn_approximation.append(model)
         if algo != "Neural Network":
             self.qn_approximation.append(
                 model.fit(np.array(X), np.array(y)))
         print("{} / 100".format((N / stop) * 100))
     return self.qn_approximation[-1]
print("Test Set shape - ", X_test.shape)


# In[ ]:


model = Sequential()
model.add(Dense(64, input_dim=X_train.shape[1], activation='relu'))
#model.add(Dense(1024, activation='relu'))
#model.add(Dense(512, activation='relu'))
#model.add(Dense(1024, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1)) # Output
opt = keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=opt,loss='mean_squared_error')
estp = EarlyStopping(monitor='val_loss', min_delta=0,patience=5, verbose=1, mode='auto',restore_best_weights=True)
model.fit(X_train,y_train,validation_split=0.15,shuffle='True',verbose=2,epochs=200, callbacks=[estp])


# In[ ]:


model.summary()


# In[ ]:


pred9 = model.predict(X_test)
mse9 = mean_squared_error(y_test, pred9)
Ejemplo n.º 5
0
X = np.reshape(xs, (-1, 1))
y = np.reshape(ys, (-1, 1))

X_test = np.array([10, 20, -2])
y_test = np.array([19.0, 39, -5])

model.fit(X, y)

prediction = model.predict(np.reshape(X_test, (-1, 1)))

print("MAE:", mean_absolute_error(y_test, prediction))
print("Expected: [[-5.]]", "Got:", model.predict(np.reshape([-2], (-1, 1))))

# Tensorflow
print("\n##### USING TENSORFLOW")

model = Sequential()
model.add(Dense(units=1, input_shape=[1]))

# SGD: Stochastic Gradient Descent

model.compile(optimizer="sgd", loss="mean_squared_error")

model.fit(
    xs,
    ys,
    epochs=500,
)
print("Expected: [[19.]]", "Got:", model.predict([10.0]))
# Adding the input layer and the first hidden layer
regressor.add(
    Dense(output_dim=6, init='uniform', activation='relu', input_dim=12))

# Adding the second hidden layer
regressor.add(Dense(output_dim=6, init='uniform', activation='relu'))
# Adding the second hidden layer
regressor.add(Dense(output_dim=6, init='uniform', activation='relu'))

# Adding the output layer
regressor.add(Dense(output_dim=1, init='uniform', activation='linear'))

# Compiling the ANN
regressor.compile(optimizer='adam',
                  loss='mean_absolute_error',
                  metrics=['mean_absolute_error'])

# Fitting the ANN to the Training set
regressor.fit(X_train, y_train, batch_size=10, nb_epoch=100)

# Predicting the Test set results
y_pred = regressor.predict(X_test)
y_pred.reshape([
    1204,
])
y_pred.shape

#Calculating Score
import numpy as np
# import math
Ejemplo n.º 7
0
# use neural network
import keras
from keras.layers import Dense
from keras.models import Sequential
from keras import backend
from matplotlib import pyplot

def R2(y_true, y_pred):	
    return 1- (backend.mean(backend.square(y_pred - y_true), axis=-1)/backend.mean(backend.square(backend.mean(y_true) - y_true), axis=-1))       

n_cols = X.shape[1]
input_shape = (n_cols,)
# Specify the model
model = Sequential()
model.add(Dense(350, activation='relu', input_shape = input_shape))
model.add(Dense(200, activation='relu'))
model.add(Dense(150, activation='relu'))
model.add(Dense(1))
# Compile the model
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mse', 'mae','mape', R2])
# train model
history = model.fit(train_features, train_labels, validation_data=(test_features,test_labels), epochs=1000, batch_size=1200)
# plot metrics
pyplot.plot(history.history['val_mean_squared_error'])
pyplot.plot(history.history['val_mean_absolute_error'])
pyplot.plot(history.history['val_mean_absolute_percentage_error'])
pyplot.plot(history.history['val_R2'])
pyplot.show()

Ejemplo n.º 8
0
regressor = Sequential()

regressor.add(Dense(units = 500, kernel_initializer = 'uniform', activation = 'relu', input_dim = 1))
regressor.add(Dropout(.2))

regressor.add(Dense(units = 500, kernel_initializer = 'uniform', activation = 'relu'))
regressor.add(Dropout(.2))


regressor.add(Dense(units = 500, kernel_initializer = 'uniform', activation = 'relu'))
regressor.add(Dropout(.2))

regressor.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))

regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')

regressor.fit(X_train, y_train, batch_size = 32, epochs = 200)



regressor.save('1.h5')

del regressor

regressor = load_model('1.h5')

real_stock_price = np.array(X_test)
inputs = real_stock_price
predicted_stock_price = regressor.predict(inputs)
Ejemplo n.º 9
0
def modelling(X_train,
              y_train,
              X_test,
              y_test,
              fs='lasso',
              method='ols',
              select=500):

    if method == 'ols':
        from sklearn.linear_model import LinearRegression
        mod = LinearRegression().fit(X_train, y_train)

    if method == 'elasticNet':
        from sklearn.linear_model import ElasticNetCV
        mod = ElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],
                           cv=10,
                           tol=0.001,
                           n_jobs=7)
        mod.fit(X_train, y_train)

    if method == 'xgboost':
        import xgboost as xg
        max_depth = 3
        min_child_weight = 10
        subsample = 0.5
        colsample_bytree = 0.6
        objective = 'reg:linear'
        num_estimators = 1000
        learning_rate = 0.3

        mod = xg.XGBRegressor(max_depth=max_depth,
                              min_child_weight=min_child_weight,
                              subsample=subsample,
                              colsample_bytree=colsample_bytree,
                              objective=objective,
                              n_estimators=num_estimators,
                              learning_rate=learning_rate)
        mod.fit(X_train, y_train)

        # implement CV

    if method == 'nn':
        from sklearn.preprocessing import StandardScaler
        from keras.models import Sequential
        from keras.layers import Dense
        from keras.callbacks import EarlyStopping
        from keras.callbacks import ModelCheckpoint
        from keras.models import load_model

        mod = Sequential()
        # input layer
        mod.add(Dense(50, activation='relu', input_shape=(int(select), )))
        # hidden layer
        mod.add(Dense(50, activation='relu'))
        # output layer
        mod.add(Dense(1, activation='linear'))
        # mod.summary()
        mod.compile(loss='mse', optimizer='adam', metrics=['accuracy'])

        # patient early stopping and select best model (not always the last)
        es = EarlyStopping(monitor='val_loss',
                           mode='min',
                           verbose=1,
                           patience=200)
        mc = ModelCheckpoint('best_model.h5',
                             monitor='val_acc',
                             mode='max',
                             verbose=1,
                             save_best_only=True)

        history = mod.fit(X_train,
                          y_train,
                          epochs=1000,
                          batch_size=25,
                          verbose=1,
                          validation_data=(Xt, y_test),
                          callbacks=[es])

    # pickle.dump(mod, open('models/' + fs + '_' + method + '_' +
    #                        select + '.sav', 'wb'))

    if method == 'nn':
        rmse = (sum((np.concatenate(mod.predict(X_test)) - y_test)**2) /
                y_test.size)**(.5)
    else:
        rmse = (sum((mod.predict(X_test) - y_test)**2) / y_test.size)**(.5)

    return mod, rmse
Ejemplo n.º 10
0
"""
# ANN
import keras
from keras.models import Sequential
from keras.layers import Dense

classifier = Sequential()
classifier.add(
    Dense(units=9,
          kernel_initializer='uniform',
          activation='relu',
          input_dim=19))
classifier.add(Dense(units=4, kernel_initializer='uniform', activation='relu'))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='relu'))
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])
classifier.fit(X, y, batch_size=10, epochs=200)

#Confusion_Matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)

pid = test['PassengerId']
test = test.iloc[:, 1:].drop(['Name', 'Ticket', 'Cabin'], axis=1)
test['Pclass'] = test['Pclass'] / 3
test['Sex'] = test['Sex'].replace('female', 0).replace('male', 1)

test['Age'] = test['Age'].fillna(round(test['Age'].mean()))
test['Age'] = test['Age'] / max(test['Age'])
# 当SibSp不等于0-5中任何一值时,即为8
Ejemplo n.º 11
0
train_X, test_X, train_label, test_label = train_test_split(X,
                                                            Y,
                                                            test_size=0.1,
                                                            shuffle=False)
train_X = np.array(train_X)
test_X = np.array(test_X)
train_label = np.array(train_label)
test_label = np.array(test_label)
model = Sequential()
#add model layers

model.add((LSTM(128, return_sequences=True)))
model.add((LSTM(64, return_sequences=False)))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='RMSprop', loss='mse')
model.fit(train_X,
          train_label,
          validation_data=(test_X, test_label),
          epochs=50,
          shuffle=False)
print(model.evaluate(test_X, test_label))
# model.summary()
predicted = model.predict(test_X)
test_label[:, 0] = y_scaler.inverse_transform(test_label[:, 0])
predicted = np.array(predicted[:, 0]).reshape(-1, 1)
predicted = y_scaler.inverse_transform(predicted)
plt.plot(test_label[:, 0], color='black', label=' Stock Price')
plt.plot(predicted, color='green', label='Predicted  Stock Price')
plt.title(' Stock Price Prediction')
plt.xlabel('Time')
Ejemplo n.º 12
0
test_X = test_X.reshape(test_X.shape[0],7,50,1)
model = Sequential()
#add model layers
model.add(TimeDistributed(Conv1D(128, kernel_size=1, activation='relu', input_shape=(None,50,1))))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Conv1D(256, kernel_size=1, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Conv1D(512, kernel_size=1, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Flatten()))
model.add(Bidirectional(LSTM(200,return_sequences=True)))
model.add(Dropout(0.25))
model.add(Bidirectional(LSTM(200,return_sequences=False)))
model.add(Dropout(0.5))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='adam', loss='mse')
model.fit(train_X, train_label, validation_data=(test_X,test_label), epochs=200)
print(model.summary())
print(model.evaluate(test_X,test_label))
# model.summary()
# predicted  = model.predict(test_X)
# test_label = (test_label[:,0])
# predicted = np.array(predicted[:,0]).reshape(-1,1)
# for j in range(len_t , len_t + len(test_X)):
#     temp =stock.iloc[j,4]
#     test_label[j - len_t] = test_label[j - len_t] * temp + temp
#     predicted[j - len_t] = predicted[j - len_t] * temp + temp
# plt.plot(test_label, color = 'black', label = ' Stock Price')
# plt.plot(predicted, color = 'green', label = 'Predicted  Stock Price')
# plt.title(' Stock Price Prediction')
# plt.xlabel('Time')
Ejemplo n.º 13
0
# Regression
total_train = np.c_[image_train, desc_train, title_train]
model = LinearRegression(normalize=True)
model.fit(total_train, y_train)
total_test = np.c_[image_outcome, desc_outcome, title_outcome]
total_outcome = model.predict(total_test)
total_outcome[total_outcome >= 0.5] = 1
total_outcome = total_outcome.astype(int)
print("Linear Regression:", sklearn.metrics.f1_score(total_outcome,y_test,average = 'micro')*100)

# Neural Network
input_im = Input(shape=(122,))
input_de = Input(shape=(122,))
input_ti = Input(shape=(122,))

combined = keras.layers.concatenate([input_im, input_de, input_ti])
z = Dense(330, activation = 'sigmoid')(combined)
z = Dense(270, activation = 'tanh')(z)
z = Dense(122, activation = 'sigmoid')(z)

model = Model(inputs=[input_im, input_de, input_ti], outputs=z)
model.summary()

model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit([image_train, desc_train, title_train], y_train, epochs=20, batch_size=256)

y_out = model.predict([image_outcome, desc_outcome, title_outcome])
y_out[y_out > 0.5] = 1
y_out = y_out.astype(int)
print("NN:", f1_score(y_out, y_test, average='micro'))
Ejemplo n.º 14
0
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import optimizers

sgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)

model = Sequential()
model.add(Dense(units=3, activation='relu', input_shape=(3, )))

model.add(Dense(units=300, activation='relu'))
model.add(Dense(units=300, activation='relu'))

model.add(Dense(units=1, activation='sigmoid'))
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

model.fit(X_train, y_train, batch_size=32, epochs=1000)

y_pred = model.predict(X_test)
y_pred_D = sc_y.inverse_transform(y_pred)

score = model.evaluate(X_test, Y_test)
print('Test loss:', score[0])
print('Test accuracy:', score[1])

from sklearn.model_selection import cross_val_score
test = cross_val_score(estimator=model,
                       X=X_train,
                       y=Y_train,
Ejemplo n.º 15
0
def model_predictions(modeloption, x_train, y_train, x_test, y_test):

    if modeloption == 'lineareg':
        model = LinearRegression()
        model.fit(x_train, y_train)  # Training
        y_predicted = model.predict(x_test)  # Test

    elif modeloption == 'ridge':
        model = RidgeCV(alphas=ALPHAS)
        model.fit(x_train, y_train)
        print(model.alpha_)
        y_predicted = model.predict(x_test)

    elif modeloption == 'mlp':
        # Build Keras model
        model = Sequential()
        """
        #model.add(keras.layers.Dropout(0.2, input_shape=(x_train.shape[1],)))
        model.add(Dense(NEURONSPERLAYER, input_shape =(x_train.shape[1],)))
        model.add(Activation('relu'))
        #model.add(keras.layers.Dropout(0.2))
        #model.add(Dense(NEURONSPERLAYER))
        #model.add(Activation('relu'))
        #model.add(keras.layers.Dropout(0.2))
        model.add(Dense(NEURONSOUTPUT))
        model.add(Activation('linear'))
        """
        #trial11
        #model.add(keras.layers.Dropout(0.3, input_shape=(x_train.shape[1],)))
        model.add(
            Dense(NEURONSPERLAYER,
                  activation='sigmoid',
                  input_shape=(x_train.shape[1], )))
        model.add(Dense(1000, activation=None))
        model.compile(loss='mean_squared_error', optimizer=OPTIMIZER)

        history = model.fit(x_train,
                            y_train,
                            epochs=EPOCHS,
                            batch_size=BATCH,
                            verbose=0)
        y_predicted = model.predict(x_test,
                                    batch_size=BATCH,
                                    verbose=0,
                                    steps=None)

        # show training loss and test loss
        print(history.history['loss'])
        print(model.evaluate(x_test, y_test, batch_size=BATCH, verbose=0))

    elif modeloption == 'knn':
        model = KNeighborsRegressor(n_neighbors=NEIGHBORS, weights='distance')
        model.fit(x_train, y_train)
        y_predicted = model.predict(x_test)

    elif modeloption == 'kernelreg':
        model = KernelRidge(kernel=KERNEL,
                            degree=DEGREE,
                            alpha=0.001,
                            coef0=10)
        model.fit(x_train, y_train)
        y_predicted = model.predict(x_test)

    return y_predicted
Ejemplo n.º 16
0
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD

model = Sequential()

model.add(Dense(units=10, input_dim=1))
model.add(Activation('tanh'))
model.add(Dense(units=1))
model.add(Activation('tanh'))

sgd = SGD(lr=0.3)

model.compile(optimizer=sgd, loss='mse')

for step in range(3):
    cost = model.train_on_batch(user1.month, user1.grade)
    if step % 6 == 0:
        print('cost: ', cost)

W, b = model.layers[0].get_weights()
print('W:', W, ' b: ', b)
print(len(model.layers))

y_pred = model.predict(user1.month)

plt.scatter(user1.month, user1.grade)
plt.plot(user1.month, user1.grade, 'r-', lw=2, label="best line")
plt.legend(loc=1)
    # implement neural network
    model = Sequential([
        Dense(9, input_shape=(9, ), activation='relu'),
        Dense(20, activation='relu'),
        Dense(1, activation='linear'),
    ])

    model.summary(print_fn=lambda line: logger.info(line))

    # compile the model
    learning_rate = 0.001

    logger.info('Compile the model with Learning rate = %f', learning_rate)
    model.compile(Adam(lr=learning_rate),
                  loss='mean_squared_error',
                  metrics=['mse', 'mae'])

    # train the model
    history = model.fit(x_train,
                        y_train,
                        validation_split=0.2,
                        batch_size=32,
                        epochs=6000,
                        verbose=2)

    logger.info(history.history.keys())
    # "Loss"
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
Ejemplo n.º 18
0
def r2_regression(variables,
                  targets,
                  num_ensemble=10,
                  method="linear",
                  nn_epochs=30,
                  gb_depth=3,
                  gb_n_estimators=100,
                  gb_alpha=0.4,
                  gb_lambda=0.4):
    assert method in valid_methods
    r_squared_val = []
    r_squared_train = []

    num_variables = variables.shape[-1]
    iou_predictions = []

    print("Aggregating {} regression over {} ensemble members...".format(
        method, num_ensemble))
    for i in tqdm.tqdm(range(num_ensemble)):
        np.random.seed(i)
        val_mask = np.random.rand(len(targets)) < 0.2
        train_mask = np.logical_not(val_mask)
        variables_val, variables_train = variables[val_mask, :], variables[
            train_mask, :]
        targets_val, targets_train = targets[val_mask], targets[train_mask]

        if method == "linear":
            model = LinearRegression().fit(variables_train, targets_train)
        elif method == "shallow nn":
            model = shallow_net_model(num_variables)
            model.compile(loss='mean_squared_error',
                          optimizer='adam',
                          metrics=[shallow_net_stddev])
            model.fit(variables_train,
                      targets_train,
                      epochs=nn_epochs,
                      batch_size=128,
                      verbose=0)
        elif method == "gradient boost":
            model = XGBRegressor(verbosity=0,
                                 max_depth=gb_depth,
                                 colsample_bytree=0.5,
                                 n_estimators=gb_n_estimators,
                                 reg_alpha=gb_alpha,
                                 reg_lambda=gb_lambda).fit(
                                     variables_train, targets_train)

        prediction = np.clip(model.predict(variables), 0, 1)
        if i == 0:
            iou_predictions.append(targets[val_mask])
            iou_predictions.append(prediction[val_mask])
        r_squared_val.append(r2_score(targets_val, prediction[val_mask]))
        r_squared_train.append(r2_score(targets_train, prediction[train_mask]))

    iou_predictions = np.array(iou_predictions)
    r_squared_val, r_squared_train = np.array(r_squared_val), np.array(
        r_squared_train)
    frame = pd.DataFrame({
        "mean R^2 val": [np.mean(r_squared_val)],
        "std R^2 val": [np.std(r_squared_val)],
        "mean R^2 train": [np.mean(r_squared_train)],
        "std R^2 train": [np.std(r_squared_train)]
    })

    return iou_predictions, frame
Ejemplo n.º 19
0
plt.title("Simple Linear Regression Model")
plt.plot(x, y, '.',label="Origin")
plt.plot(x, pred_y,'.',label="Model")
plt.legend()
plt.xlabel("Key")
plt.ylabel("Pred_Pos = CDF(Key)")
plt.show()


model  = Sequential()
model.add(Dense(8, input_dim=1, activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(Dense(1))

sgd=keras.optimizers.SGD(lr=0.0001)    # lr:學習率,可調參數
model.compile(loss="mse", optimizer=sgd, metrics=["mse"])
model.fit(norm_x, y, epochs=1000, batch_size=32, verbose=0)  # norm_x:訓練資料, y:訓練目標
pred_y = model.predict(norm_x)

plt.title("Neural Network 8x8 Model")
plt.plot(x, y, '.',label="Origin")
plt.plot(x, pred_y,'.',label="Model")
plt.legend()
plt.xlabel("Key")
plt.ylabel("Pred_Pos = CDF(Key)")
plt.show()


model  = Sequential()
model.add(Dense(16, input_dim=1, activation="relu"))
model.add(Dense(16, activation="relu"))
          activation='relu'))

# The Hidden Layers :
'''
NN_model.add(Dense(2048, kernel_initializer='normal',activation='relu'))
NN_model.add(Dense(1024, kernel_initializer='normal',activation='relu'))
'''
model.add(Dense(512, kernel_initializer='normal', activation='relu'))
model.add(Dense(256, kernel_initializer='normal', activation='relu'))

# The Output Layer :
model.add(Dense(1, kernel_initializer='normal', activation='linear'))

# Compile the network :
model.compile(loss='mean_absolute_error',
              optimizer='adam',
              metrics=['mean_absolute_error'])
#NN_model.compile(optimizer='adam', loss='mean_absolute_error', metrics=['accuracy'])

model.summary()

checkpoint_name = 'Weights-{epoch:03d}--{val_loss:.5f}.hdf5'
checkpoint = ModelCheckpoint(checkpoint_name,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='auto')
callbacks_list = [checkpoint]

model.fit(X_train,
          y_train,
Ejemplo n.º 21
0
for i in range(60,len(train)):
    x_train.append(scaled_data[i-60:i,0])
    y_train.append(scaled_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)

x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))

#check for best units
myRMS = []
for p in range (40,60):
    model = Sequential()
    model.add(LSTM(units=p, return_sequences=True, input_shape=(x_train.shape[1],1)))
    model.add(LSTM(units=p))
    model.add(Dense(1))
    
    model.compile(loss='mean_squared_error', optimizer='adam')
    model.fit(x_train, y_train, epochs=10, batch_size=32, verbose=2)
    
    #predicting values, using past 60 from the train data
    inputs = new_data[len(new_data) - len(valid) - 60:].values
    inputs = inputs.reshape(-1,1)
    inputs  = scaler.transform(inputs)
    
    X_test = []
    for i in range(60,inputs.shape[0]):
        X_test.append(inputs[i-60:i,0])
    X_test = np.array(X_test)
    
    X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
    closing_price = model.predict(X_test)
    closing_price = scaler.inverse_transform(closing_price)
Ejemplo n.º 22
0
    atom_state, bond_state = message_block(atom_state, bond_state,
                                           connectivity)

atom_state = Dense(atom_features // 2, activation='softplus')(atom_state)
atom_state = Dense(1)(atom_state)
atom_state = Add()([atom_state, atomwise_energy])

output = ReduceAtomToMol(reducer='sum')([atom_state, snode_graph_indices])

model = GraphModel(
    [node_graph_indices, atom_types, distance_rbf, connectivity], [output])

lr = 5E-4
epochs = 500

model.compile(optimizer=keras.optimizers.Adam(lr=lr), loss='mae')
model.summary()

model_name = 'schnet_edgeupdate_fixed'

if not os.path.exists(model_name):
    os.makedirs(model_name)

filepath = model_name + "/best_model.hdf5"
checkpoint = ModelCheckpoint(filepath,
                             save_best_only=True,
                             period=10,
                             verbose=1)
csv_logger = CSVLogger(model_name + '/log.csv')