Ejemplo n.º 1
0
def createPredictionModel(trainX, trainY, method = 'LR'):
    print('Creating base prediction model')

    if method == 'LR':
        model = LinearRegression(fit_intercept=True)

    elif method == 'Ridge':
        model = Ridge(5)

    elif method == 'keras':
        from keras.models import Sequential
        from keras.layers import Dense, Dropout
        model = Sequential()
        model.add(Dense(units=3, activation='relu', input_dim=trainX.shape[1]))
        model.add(Dropout(0.01))
        model.add(Dense(units=1))
        model.compile(loss='mse', optimizer='adam')


    else:
        print('Unknown model! Implement yourself?')
        raise ValueError()

    model.fit(trainX, trainY)#, epochs=50, verbose=2) #do not use model = when using keras

    res = model.predict(trainX) - trainY  # calculate residuals
    stdevRes = res.std()  # calculate the standard deviation of the residuals

    return model, res, stdevRes
Ejemplo n.º 2
0
                def get_classifier(name_of_reg, params):
                    reg = None

                    if name_of_reg == 'MultiLinear':
                        reg = LinearRegression()
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'Support Vector':
                        reg = SVR(kernel=params['kernel'])
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'DecisionTree':
                        reg = DecisionTreeRegressor(random_state=0)
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'RandomForest':
                        reg = RandomForestRegressor(
                            n_estimators=params['n_estimators'],
                            random_state=0,
                            min_samples_leaf=.0001)
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'ExtraTree':
                        reg = ExtraTreesRegressor(
                            n_estimators=params['n_estimators'])
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'GradientBoosting':
                        reg = GradientBoostingRegressor(random_state=0)
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'XGBoost':
                        reg = XGBRegressor()
                        reg.fit(X_train, y_train)
                    elif name_of_reg == 'Artificial Neural Network':
                        reg = tf.keras.models.Sequential()
                        reg.add(
                            tf.keras.layers.Dense(units=60, activation='relu'))
                        reg.add(
                            tf.keras.layers.Dense(units=60, activation='relu'))
                        reg.add(tf.keras.layers.Dense(units=1))
                        reg.compile(optimizer='adam',
                                    loss='mean_squared_error')
                        reg.fit(X_train,
                                y_train,
                                batch_size=32,
                                epochs=params['epochs'])

                    else:
                        st.warning('Select your choice of algorithm')

                    return reg
 def fitted_q_iteration(self,
                        four_tuples_set,
                        stop,
                        algo="Linear Regression"):
     N = 0
     print("Computing fitted q iteration ...")
     print("{} / 100".format((N / stop) * 100))
     while N < stop:  # temp stopping condition
         N += 1
         X = []
         y = []
         for t in four_tuples_set:
             X.append([t[0][0], t[0][1], t[1]])
             if N == 1:
                 y.append(t[2])
             else:
                 y.append(t[2] + self.domain.d_factor * max(
                     self.qn_approximation[-1].predict(
                         np.array([t[3][0], t[3][1], -4]).reshape(1, -1))
                     [0], self.qn_approximation[-1].predict(
                         np.array([t[3][0], t[3][1], 4]).reshape(1, -1))[0])
                          )
         if algo == "Linear Regression":
             model = LinearRegression()
         elif algo == "Extremely Randomized Trees":
             model = ExtraTreesRegressor(n_estimators=10)
         elif algo == "Neural Network":
             X = np.array(X)
             y = np.array(y)
             model = Sequential()
             model.add(Dense(8, input_dim=3, activation='relu'))
             model.add(Dense(4, activation='relu'))
             model.add(Dense(1, activation='linear'))
             model.compile(loss='mean_squared_error',
                           optimizer='adam',
                           metrics=['mse'])
             # model.fit(X, y, epochs=10, batch_size=10, verbose=0)
             model.fit(X, y, epochs=100, batch_size=128, verbose=2)
             self.qn_approximation.append(model)
         if algo != "Neural Network":
             self.qn_approximation.append(
                 model.fit(np.array(X), np.array(y)))
         print("{} / 100".format((N / stop) * 100))
     return self.qn_approximation[-1]
Ejemplo n.º 4
0
plt.ylabel("User1 Grade")
plt.show()

print("Fitting parameters: intercept", a, ",regression coefficient:", b)
print("Best Fitting Line: Y = ", round(a, 2), "+", round(b[0], 2), "* X")

import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD

model = Sequential()

model.add(Dense(units=10, input_dim=1))
model.add(Activation('tanh'))
model.add(Dense(units=1))
model.add(Activation('tanh'))

sgd = SGD(lr=0.3)

model.compile(optimizer=sgd, loss='mse')

for step in range(3):
    cost = model.train_on_batch(user1.month, user1.grade)
    if step % 6 == 0:
        print('cost: ', cost)

W, b = model.layers[0].get_weights()
print('W:', W, ' b: ', b)
# In[ ]:


Y = train2['LOS'].values
X = train2.drop(columns=['LOS'])
print("Total Dataset Shape - ", X.shape)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
print("Train Set shape - ", X_train.shape)
print("Test Set shape - ", X_test.shape)


# In[ ]:


model = Sequential()
model.add(Dense(64, input_dim=X_train.shape[1], activation='relu'))
#model.add(Dense(1024, activation='relu'))
#model.add(Dense(512, activation='relu'))
#model.add(Dense(1024, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(1)) # Output
opt = keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=opt,loss='mean_squared_error')
estp = EarlyStopping(monitor='val_loss', min_delta=0,patience=5, verbose=1, mode='auto',restore_best_weights=True)
model.fit(X_train,y_train,validation_split=0.15,shuffle='True',verbose=2,epochs=200, callbacks=[estp])


# In[ ]:

Ejemplo n.º 6
0
X = np.reshape(xs, (-1, 1))
y = np.reshape(ys, (-1, 1))

X_test = np.array([10, 20, -2])
y_test = np.array([19.0, 39, -5])

model.fit(X, y)

prediction = model.predict(np.reshape(X_test, (-1, 1)))

print("MAE:", mean_absolute_error(y_test, prediction))
print("Expected: [[-5.]]", "Got:", model.predict(np.reshape([-2], (-1, 1))))

# Tensorflow
print("\n##### USING TENSORFLOW")

model = Sequential()
model.add(Dense(units=1, input_shape=[1]))

# SGD: Stochastic Gradient Descent

model.compile(optimizer="sgd", loss="mean_squared_error")

model.fit(
    xs,
    ys,
    epochs=500,
)
print("Expected: [[19.]]", "Got:", model.predict([10.0]))
Ejemplo n.º 7
0
scaler  = MinMaxScaler(feature_range=(0, 1))
dataset_scaled = scaler.fit_transform(dataset)

X = dataset_scaled[:, 0]
y = dataset_scaled[:, 1]

#X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)

dataset_sz = X.shape[0]
train_sz = X_train.shape[0]
test_sz = X_test.shape[0]


regressor = Sequential()

regressor.add(Dense(units = 500, kernel_initializer = 'uniform', activation = 'relu', input_dim = 1))
regressor.add(Dropout(.2))

regressor.add(Dense(units = 500, kernel_initializer = 'uniform', activation = 'relu'))
regressor.add(Dropout(.2))


regressor.add(Dense(units = 500, kernel_initializer = 'uniform', activation = 'relu'))
regressor.add(Dropout(.2))

regressor.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))

regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')

regressor.fit(X_train, y_train, batch_size = 32, epochs = 200)
Ejemplo n.º 8
0
# use neural network
import keras
from keras.layers import Dense
from keras.models import Sequential
from keras import backend
from matplotlib import pyplot

def R2(y_true, y_pred):	
    return 1- (backend.mean(backend.square(y_pred - y_true), axis=-1)/backend.mean(backend.square(backend.mean(y_true) - y_true), axis=-1))       

n_cols = X.shape[1]
input_shape = (n_cols,)
# Specify the model
model = Sequential()
model.add(Dense(350, activation='relu', input_shape = input_shape))
model.add(Dense(200, activation='relu'))
model.add(Dense(150, activation='relu'))
model.add(Dense(1))
# Compile the model
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mse', 'mae','mape', R2])
# train model
history = model.fit(train_features, train_labels, validation_data=(test_features,test_labels), epochs=1000, batch_size=1200)
# plot metrics
pyplot.plot(history.history['val_mean_squared_error'])
pyplot.plot(history.history['val_mean_absolute_error'])
pyplot.plot(history.history['val_mean_absolute_percentage_error'])
pyplot.plot(history.history['val_R2'])
pyplot.show()

Ejemplo n.º 9
0
model.fit(x_train, y_train)
new_preds = model.predict(x_test)

# Results
k_rms = np.sqrt(np.mean(np.power((np.array(y_test) - np.array(preds)), 2)))
print(k_rms)

test['Predictions'] = 0
test['Predictions'] = new_preds

plt.plot(train['Adj. Close'])
plt.plot(test[['Adj. Close', 'Predictions']])

# Multilayer Perceptron
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(100, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(100, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, activation=tf.nn.relu))
model.compile(optimizer='adam', loss='mean_squared_error')

X_train = np.array(x_train)
Y_train = np.array(y_train)

model.fit(X_train, Y_train, epochs=500)
preds = model.predict(x_test)

# Results
mlp_rms = np.sqrt(np.mean(np.power((np.array(y_test) - np.array(preds)), 2)))
print(mlp_rms)

test['Predictions'] = 0
##Dumping
from sklearn.externals import joblib
joblib.dump(regressor, 'regressor.sav')

# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout

# Initialising the RNN
regressor = Sequential()

teste = X_train.reshape((1291, 216, 1))
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(Dense(50, input_dim=86, activation='relu'))

# Adding a second LSTM layer and some Dropout regularisation
regressor.add(Dense(50, input_dim=20, activation='relu'))

# Adding a third LSTM layer and some Dropout regularisation
regressor.add(Dense(25, input_dim=20, activation='softmax'))

# Adding the output layer
regressor.add(Dense(units=1))

# Compiling the RNN
regressor.compile(optimizer='adam', loss='mean_squared_error')

# Fitting the RNN to the Training set
history = regressor.fit(X_train, y_train, epochs=100, batch_size=32)
Ejemplo n.º 11
0
classifier = XGBClassifier()
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)

ac = accuracy_score(y_test,y_pred)
print('XGBoost:',ac)
"""
# ANN
import keras
from keras.models import Sequential
from keras.layers import Dense

classifier = Sequential()
classifier.add(
    Dense(units=9,
          kernel_initializer='uniform',
          activation='relu',
          input_dim=19))
classifier.add(Dense(units=4, kernel_initializer='uniform', activation='relu'))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='relu'))
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
                   metrics=['accuracy'])
classifier.fit(X, y, batch_size=10, epochs=200)

#Confusion_Matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)

pid = test['PassengerId']
test = test.iloc[:, 1:].drop(['Name', 'Ticket', 'Cabin'], axis=1)
Ejemplo n.º 12
0
data = data['2017-05-01':'2018-09-02']
data_train = data.copy()  #取2014年前的数据建模
print("data_train", type(data_train))
data_mean = data_train.mean()
print("data_mean:", data_mean)
data_std = data_train.std()
print("data_std:", data_std)
data_train = (data_train - data_mean) / data_std  #数据标准化
x_train = data_train[feature].as_matrix()  #特征数据
y_train = data_train[u'AQI'].as_matrix()  #标签数据

from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation

model = Sequential()  #建立模型
model.add(Dense(input_dim=6, output_dim=12))
model.add(Activation('relu'))  #用relu函数作为激活函数,能够大幅提供准确度
model.add(Dense(input_dim=12, output_dim=1))
model.compile(loss='mean_squared_error', optimizer='adam')  #编译模型
model.fit(x_train, y_train, nb_epoch=10000, batch_size=16)  #训练模型,学习一万次
model.save_weights(modelfile)  #保存模型参数

#预测,并还原结果。
x = ((data[feature] - data_mean[feature]) / data_std[feature]).as_matrix()
data[u'AQI_pred'] = model.predict(x) * data_std[u'AQI'] + data_mean[u'AQI']
data.to_csv(pridictfile)
import matplotlib.pyplot as plt  #画出预测结果图
p = data[[u'AQI', 'AQI_pred']].plot(subplots=True, style=['b-o', 'r-*'])
plt.show()
pridictDataToMysql(data)
Ejemplo n.º 13
0
    X.append(
        np.array(stock_final.iloc[i:i + window_size, :]).reshape(
            window_size * 4, 1))
    Y.append(np.array(stock.iloc[i + window_size, 4]).reshape(1, 1))
train_X, test_X, train_label, test_label = train_test_split(X,
                                                            Y,
                                                            test_size=0.1,
                                                            shuffle=False)
train_X = np.array(train_X)
test_X = np.array(test_X)
train_label = np.array(train_label)
test_label = np.array(test_label)
model = Sequential()
#add model layers

model.add((LSTM(128, return_sequences=True)))
model.add((LSTM(64, return_sequences=False)))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='RMSprop', loss='mse')
model.fit(train_X,
          train_label,
          validation_data=(test_X, test_label),
          epochs=50,
          shuffle=False)
print(model.evaluate(test_X, test_label))
# model.summary()
predicted = model.predict(test_X)
test_label[:, 0] = y_scaler.inverse_transform(test_label[:, 0])
predicted = np.array(predicted[:, 0]).reshape(-1, 1)
predicted = y_scaler.inverse_transform(predicted)
Ejemplo n.º 14
0
#converting dataset into x_train and y_train
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)

x_train, y_train = [], []
for i in range(60,len(train)):
    x_train.append(scaled_data[i-60:i,0])
    y_train.append(scaled_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)

x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))

# create and fit the LSTM network
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1],1)))
model.add(LSTM(units=50))
model.add(Dense(1))

model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, epochs=3, batch_size=1)

"""#### Validation
The number of prior datapoints used as input defines the one-dimensional (1D) subsequence of data that the LSTM will read and learn to extract features. We denote it here by `subsequence_length` which takes the past hour of data. We can also tune this hyperparameter using cross validation across multiple validatin datasets to arrive at an optimal value
"""

subsequence_length = 60 #past one hour
#predicting next values, using past 60 from the train data
inputs = new_data[len(new_data) - len(valid) - subsequence_length:].values
inputs = inputs.reshape(-1,1)
inputs  = scaler.transform(inputs)
Ejemplo n.º 15
0
    # X.append(np.array(temp).reshape(50, 1))
    # Y.append(np.array(temp2).reshape(1,1))
train_X,test_X,train_label,test_label = train_test_split(X, Y, test_size=0.1,shuffle=False)
len_t = len(train_X)
# train_X,valid_X,train_label,valid_label = train_test_split(train_X, train_label, test_size=0.2,shuffle=True)
train_X = np.array(train_X)
test_X = np.array(test_X)
train_label = np.array(train_label)
test_label = np.array(test_label)
# valid_label = np.array(valid_label)
# valid_X = np.array(valid_X)
train_X = train_X.reshape(train_X.shape[0],7,50,1)
test_X = test_X.reshape(test_X.shape[0],7,50,1)
model = Sequential()
#add model layers
model.add(TimeDistributed(Conv1D(128, kernel_size=1, activation='relu', input_shape=(None,50,1))))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Conv1D(256, kernel_size=1, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Conv1D(512, kernel_size=1, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2)))
model.add(TimeDistributed(Flatten()))
model.add(Bidirectional(LSTM(200,return_sequences=True)))
model.add(Dropout(0.25))
model.add(Bidirectional(LSTM(200,return_sequences=False)))
model.add(Dropout(0.5))
model.add(Dense(1, activation='linear'))
model.compile(optimizer='adam', loss='mse')
model.fit(train_X, train_label, validation_data=(test_X,test_label), epochs=200)
print(model.summary())
print(model.evaluate(test_X,test_label))
Ejemplo n.º 16
0
model = Ridge()
model.fit(x_train, y_train)

from sklearn.linear_model import ElasticNet
model = ElasticNet()
model.fit(x_train, y_train)

import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import optimizers

sgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)

model = Sequential()
model.add(Dense(units=3, activation='relu', input_shape=(3, )))

model.add(Dense(units=300, activation='relu'))
model.add(Dense(units=300, activation='relu'))

model.add(Dense(units=1, activation='sigmoid'))
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

model.fit(X_train, y_train, batch_size=32, epochs=1000)

y_pred = model.predict(X_test)
y_pred_D = sc_y.inverse_transform(y_pred)

score = model.evaluate(X_test, Y_test)
Ejemplo n.º 17
0
def model_predictions(modeloption, x_train, y_train, x_test, y_test):

    if modeloption == 'lineareg':
        model = LinearRegression()
        model.fit(x_train, y_train)  # Training
        y_predicted = model.predict(x_test)  # Test

    elif modeloption == 'ridge':
        model = RidgeCV(alphas=ALPHAS)
        model.fit(x_train, y_train)
        print(model.alpha_)
        y_predicted = model.predict(x_test)

    elif modeloption == 'mlp':
        # Build Keras model
        model = Sequential()
        """
        #model.add(keras.layers.Dropout(0.2, input_shape=(x_train.shape[1],)))
        model.add(Dense(NEURONSPERLAYER, input_shape =(x_train.shape[1],)))
        model.add(Activation('relu'))
        #model.add(keras.layers.Dropout(0.2))
        #model.add(Dense(NEURONSPERLAYER))
        #model.add(Activation('relu'))
        #model.add(keras.layers.Dropout(0.2))
        model.add(Dense(NEURONSOUTPUT))
        model.add(Activation('linear'))
        """
        #trial11
        #model.add(keras.layers.Dropout(0.3, input_shape=(x_train.shape[1],)))
        model.add(
            Dense(NEURONSPERLAYER,
                  activation='sigmoid',
                  input_shape=(x_train.shape[1], )))
        model.add(Dense(1000, activation=None))
        model.compile(loss='mean_squared_error', optimizer=OPTIMIZER)

        history = model.fit(x_train,
                            y_train,
                            epochs=EPOCHS,
                            batch_size=BATCH,
                            verbose=0)
        y_predicted = model.predict(x_test,
                                    batch_size=BATCH,
                                    verbose=0,
                                    steps=None)

        # show training loss and test loss
        print(history.history['loss'])
        print(model.evaluate(x_test, y_test, batch_size=BATCH, verbose=0))

    elif modeloption == 'knn':
        model = KNeighborsRegressor(n_neighbors=NEIGHBORS, weights='distance')
        model.fit(x_train, y_train)
        y_predicted = model.predict(x_test)

    elif modeloption == 'kernelreg':
        model = KernelRidge(kernel=KERNEL,
                            degree=DEGREE,
                            alpha=0.001,
                            coef0=10)
        model.fit(x_train, y_train)
        y_predicted = model.predict(x_test)

    return y_predicted
Ejemplo n.º 18
0
path = os.getcwd() + '\izeqi\data\data_time.txt'
data2 = pd.read_csv(path,header='infer',index_col=0)
datatime=data2.values
x_train_time1= datatime[:, 0:input_size_1].astype(float)
y_train_time1 = datatime[:, input_size_1:]
scalertime_x = preprocessing.StandardScaler().fit(x_train_time1)
scalertime_y = preprocessing.StandardScaler().fit(y_train_time1)
x_train_time = scalertime_x.transform(x_train_time1)
y_train_time= scalertime_y.transform(y_train_time1)


model_time = LinearRegression()
model_time.fit(x_train_time, y_train_time)

'''
model_time=Sequential()
model_time.add(Dense(input_dim=1,kernel_initializer='random_uniform',
                bias_initializer='random_uniform',units=20))
model_time.add(keras.layers.advanced_activations.ELU(alpha=2))
model_time.add(Dropout(0.2))
model_time.add(Dense(1))

model_time.compile(optimizer='sgd',
              loss='mse')'''



#second model to calucate the price
model_value = Sequential()
#random init and zero_bias -----input layer of 4 units,and first layer is 17 units
Ejemplo n.º 19
0
x = np.reshape(x,(-1,1))
model=LinearRegression()
model.fit(x,y)
pred_y = model.predict(x)

plt.title("Simple Linear Regression Model")
plt.plot(x, y, '.',label="Origin")
plt.plot(x, pred_y,'.',label="Model")
plt.legend()
plt.xlabel("Key")
plt.ylabel("Pred_Pos = CDF(Key)")
plt.show()


model  = Sequential()
model.add(Dense(8, input_dim=1, activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(Dense(1))

sgd=keras.optimizers.SGD(lr=0.0001)    # lr:學習率,可調參數
model.compile(loss="mse", optimizer=sgd, metrics=["mse"])
model.fit(norm_x, y, epochs=1000, batch_size=32, verbose=0)  # norm_x:訓練資料, y:訓練目標
pred_y = model.predict(norm_x)

plt.title("Neural Network 8x8 Model")
plt.plot(x, y, '.',label="Origin")
plt.plot(x, pred_y,'.',label="Model")
plt.legend()
plt.xlabel("Key")
plt.ylabel("Pred_Pos = CDF(Key)")
plt.show()
Ejemplo n.º 20
0
def modelling(X_train,
              y_train,
              X_test,
              y_test,
              fs='lasso',
              method='ols',
              select=500):

    if method == 'ols':
        from sklearn.linear_model import LinearRegression
        mod = LinearRegression().fit(X_train, y_train)

    if method == 'elasticNet':
        from sklearn.linear_model import ElasticNetCV
        mod = ElasticNetCV(l1_ratio=[.1, .5, .7, .9, .95, .99, 1],
                           cv=10,
                           tol=0.001,
                           n_jobs=7)
        mod.fit(X_train, y_train)

    if method == 'xgboost':
        import xgboost as xg
        max_depth = 3
        min_child_weight = 10
        subsample = 0.5
        colsample_bytree = 0.6
        objective = 'reg:linear'
        num_estimators = 1000
        learning_rate = 0.3

        mod = xg.XGBRegressor(max_depth=max_depth,
                              min_child_weight=min_child_weight,
                              subsample=subsample,
                              colsample_bytree=colsample_bytree,
                              objective=objective,
                              n_estimators=num_estimators,
                              learning_rate=learning_rate)
        mod.fit(X_train, y_train)

        # implement CV

    if method == 'nn':
        from sklearn.preprocessing import StandardScaler
        from keras.models import Sequential
        from keras.layers import Dense
        from keras.callbacks import EarlyStopping
        from keras.callbacks import ModelCheckpoint
        from keras.models import load_model

        mod = Sequential()
        # input layer
        mod.add(Dense(50, activation='relu', input_shape=(int(select), )))
        # hidden layer
        mod.add(Dense(50, activation='relu'))
        # output layer
        mod.add(Dense(1, activation='linear'))
        # mod.summary()
        mod.compile(loss='mse', optimizer='adam', metrics=['accuracy'])

        # patient early stopping and select best model (not always the last)
        es = EarlyStopping(monitor='val_loss',
                           mode='min',
                           verbose=1,
                           patience=200)
        mc = ModelCheckpoint('best_model.h5',
                             monitor='val_acc',
                             mode='max',
                             verbose=1,
                             save_best_only=True)

        history = mod.fit(X_train,
                          y_train,
                          epochs=1000,
                          batch_size=25,
                          verbose=1,
                          validation_data=(Xt, y_test),
                          callbacks=[es])

    # pickle.dump(mod, open('models/' + fs + '_' + method + '_' +
    #                        select + '.sav', 'wb'))

    if method == 'nn':
        rmse = (sum((np.concatenate(mod.predict(X_test)) - y_test)**2) /
                y_test.size)**(.5)
    else:
        rmse = (sum((mod.predict(X_test) - y_test)**2) / y_test.size)**(.5)

    return mod, rmse
print(y_test_predict.max())

plt.scatter(y_test, y_test_predict)
plt.show()

#MSE OF THE TESTING SET
mse_test = np.mean(np.square(np.subtract(y_test, y_test_predict)))
print("Testing set Mean Squared Error: {}".format(mse_test))
"""CNN"""

model = Sequential()

# The Input Layer :
model.add(
    Dense(4096,
          kernel_initializer='normal',
          input_dim=X_train.shape[1],
          activation='relu'))

# The Hidden Layers :
'''
NN_model.add(Dense(2048, kernel_initializer='normal',activation='relu'))
NN_model.add(Dense(1024, kernel_initializer='normal',activation='relu'))
'''
model.add(Dense(512, kernel_initializer='normal', activation='relu'))
model.add(Dense(256, kernel_initializer='normal', activation='relu'))

# The Output Layer :
model.add(Dense(1, kernel_initializer='normal', activation='linear'))

# Compile the network :
Ejemplo n.º 22
0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Dropout
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score, KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline

X = data.drop(columns=['probability', 'Patient_ID', 'Health_Camp_ID'])
y = data['probability']

X = np.array(X[['Category1', 'Var1', 'Category2', 'Var5', 'LinkedIn_Shared']])
y = np.array(y)

model = keras.Sequential()
model.add(Dense(10, input_dim=5, activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(Dense(3, activation="relu"))
model.add(Dense(1, activation='sigmoid'))

model.compile(loss='mean_squared_error', optimizer='adam')
# model.summary()

callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5)
hist = model.fit(X, y, epochs=50, batch_size=1024,  callbacks=[callback], verbose=0)


# # 7. Lasso Regression

from sklearn import linear_model
#predicting the testset result
y_pred = regressor.predict(testdata)
y_pred = pd.DataFrame(y_pred)
y_pred.rename(columns={0: 'Price'}, inplace=True)
y_pred.to_excel("forestx.xlsx", index=False)

# Importing the Keras libraries and packages
# import keras
from keras.models import Sequential
from keras.layers import Dense

# Initialising the ANN
regressor = Sequential()

# Adding the input layer and the first hidden layer
regressor.add(
    Dense(output_dim=6, init='uniform', activation='relu', input_dim=12))

# Adding the second hidden layer
regressor.add(Dense(output_dim=6, init='uniform', activation='relu'))
# Adding the second hidden layer
regressor.add(Dense(output_dim=6, init='uniform', activation='relu'))

# Adding the output layer
regressor.add(Dense(output_dim=1, init='uniform', activation='linear'))

# Compiling the ANN
regressor.compile(optimizer='adam',
                  loss='mean_absolute_error',
                  metrics=['mean_absolute_error'])

# Fitting the ANN to the Training set
Ejemplo n.º 24
0
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)

x_train, y_train = [], []
for i in range(60,len(train)):
    x_train.append(scaled_data[i-60:i,0])
    y_train.append(scaled_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)

x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))

#check for best units
myRMS = []
for p in range (40,60):
    model = Sequential()
    model.add(LSTM(units=p, return_sequences=True, input_shape=(x_train.shape[1],1)))
    model.add(LSTM(units=p))
    model.add(Dense(1))
    
    model.compile(loss='mean_squared_error', optimizer='adam')
    model.fit(x_train, y_train, epochs=10, batch_size=32, verbose=2)
    
    #predicting values, using past 60 from the train data
    inputs = new_data[len(new_data) - len(valid) - 60:].values
    inputs = inputs.reshape(-1,1)
    inputs  = scaler.transform(inputs)
    
    X_test = []
    for i in range(60,inputs.shape[0]):
        X_test.append(inputs[i-60:i,0])
    X_test = np.array(X_test)
Ejemplo n.º 25
0
#Neural Network
import tensorflow as tf
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.callbacks import EarlyStopping
from keras.optimizers import RMSprop
from keras.layers import Conv1D, MaxPooling1D
from keras.optimizers import SGD
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import roc_auc_score

model=Sequential()
model.add(Dense(100, activation='sigmoid', use_bias = True))
model.add(Dense(100, activation='sigmoid', use_bias = True))
model.add(Dense(10, activation='sigmoid', use_bias = True))
model.add(Dense(10, activation='sigmoid', use_bias = 2))
model.add(Dense(5, activation='sigmoid', use_bias=10))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='loss', patience=1.99, mode='min')
history = model.fit(X_train, y_train, batch_size=64, epochs=100, verbose=1, validation_data= None,callbacks=[early_stopping])

from keras.models import model_from_json

model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)