예제 #1
0
def cnn_model(lookbacklength,lookforwardlength,test_split,batchsize,nb_epochs,shuffle):
    df = clean_grand()
    X_train,X_test,y_train,y_test = process_grand_recurrent(df,lookbacklength=lookbacklength,lookforwardlength=lookforwardlength,test_split=test_split)
    input_shape = X_train.shape[1:]
    
    model = Sequential()
    model.add(Conv1D(32, kernel_size=3,
                     input_shape=input_shape))
    model.add(LeakyReLU())
    model.add(MaxPooling1D(pool_size=3))
    model.add(Conv1D(16, kernel_size=3,
                     input_shape=input_shape))
    model.add(LeakyReLU())
    model.add(MaxPooling1D(pool_size=3))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dense(1))
    
    model.compile(loss='mse',
                  optimizer="adam")
    
    history = model.fit(X_train, y_train,
              batch_size=batchsize,
              epochs=nb_epochs,
              verbose=1,
              shuffle=shuffle)
    y_pred = []
    print("Total Number of Test Samples: {0}".format(len(X_test)))
    for i in range(len(X_test)):
        #preserves shape this way
        test = X_test[np.newaxis,i]
        pred = model.predict(test)
        if i % 1000 == 0:
            print(i)
        for j in range(lookforwardlength):
            test = np.append(test[0,1:,0],pred[0,0]).reshape(1,-1,1)
            pred = model.predict(test)
        y_pred.append(pred)
    
    y_pred = np.array(y_pred)
    
    save_keras_model(model,'R_cnn')
    
    save_np_array('r_cnn_pred',y_pred)
    
    error_reporting_regression(y_test,y_pred)
    
    error_histogram(y_test,y_pred)
    
    error_time_series(y_test,y_pred)
    
    keras_training_loss_curve(history)
예제 #2
0
def linear_model(lookbacklength, lookforwardlength, test_split):
    df = clean_grand()
    X_train, X_test, y_train, y_test = process_grand(
        df,
        lookbacklength=lookbacklength,
        lookforwardlength=lookforwardlength,
        test_split=test_split)
    X_train, X_test = np.squeeze(X_train), np.squeeze(X_test)
    clf = LinearRegression()
    clf.fit(X_train, y_train)
    y_pred = clf.predict(X_test)

    save_sklearn_model(clf, 'LinearRegressor')

    error_reporting_regression(y_test, y_pred)

    error_histogram(y_test, y_pred)

    error_time_series(y_test, y_pred)
예제 #3
0
def rf_model(lookbacklength, lookforwardlength, test_split, n_decision_trees):
    df = clean_grand()
    X_train, X_test, y_train, y_test = process_grand(
        df,
        lookbacklength=lookbacklength,
        lookforwardlength=lookforwardlength,
        test_split=test_split)
    X_train, X_test = np.squeeze(X_train), np.squeeze(X_test)
    rf = RandomForestRegressor(n_estimators=n_decision_trees)
    rf.fit(X_train, y_train)
    y_pred = rf.predict(X_test)

    save_sklearn_model(rf, 'RandomForest')

    error_reporting_regression(y_test, y_pred)

    error_histogram(y_test, y_pred)

    error_time_series(y_test, y_pred)
예제 #4
0
def lstm_model(lookbacklength,lookforwardlength,test_split,batchsize,nb_epochs,shuffle):
    df = clean_grand()
    X_train,X_test,y_train,y_test = process_grand(df,lookbacklength=lookbacklength,lookforwardlength=lookforwardlength,test_split=test_split)
    #X_train,X_test = np.expand_dims(X_train,axis=3),np.expand_dims(X_test,axis=3)
    input_shape = X_train.shape[1:]
    #input_shape = input_shape + (1,)
    model = Sequential()
    model.add(LSTM(15,input_shape=input_shape))
    model.add(LeakyReLU())
    model.add(Dense(64))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dense(32))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dense(1))
    
    model.compile(loss='mse',
                  optimizer="adam")
    
    history = model.fit(X_train, y_train,
              batch_size=batchsize,
              epochs=nb_epochs,
              verbose=1,
              shuffle=shuffle)
    
    y_pred = model.predict(X_test)
    
    save_keras_model(model,'lstm')
    
    error_reporting_regression(y_test,y_pred)
    
    error_histogram(y_test,y_pred)
    
    error_time_series(y_test,y_pred)
    
    keras_training_loss_curve(history)
예제 #5
0
def fft_model(lookbacklength,lookforwardlength,test_split):
    df = clean_grand()
    X_train,X_test,y_train,y_test = process_grand(df,lookbacklength=lookbacklength,lookforwardlength=lookforwardlength,test_split=test_split)
예제 #6
0
def just_trying_fft(n=100):
    df = clean_grand()
    temp = df['temp'].values
    spectra = np.fft.rfft(temp,n=n)
    plt.plot(spectra)
예제 #7
0
import pandas as pd
from scipy.fftpack import rfft
from scipy import optimize
from statsmodels.tsa.arima_model import ARIMA
#plotting
import matplotlib.pyplot as plt
#home-made
sys.path.append('../../utils')
from preprocessing import temp_forecasting_shape_processing, test_train_split
from error_reporting import error_reporting_regression, error_histogram, error_time_series, keras_training_loss_curve
from helpers import save_model, load_tsv

sys.path.append('../data_cleaning')
from grand import process_grand, clean_grand

df = clean_grand()
series = df['temp'].values
model = ARIMA(series, order=(5, 1, 0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
residuals = pd.DataFrame(model_fit.resid)
#error_reporting_regression
X = series
size = int(len(X) * 0.999)
train, test = X[0:size], X[size:len(X)]
history = [x for x in train]
predictions = list()
lookforwardlength = 1
for t in range(len(test)):
    model = ARIMA(history, order=(5, 1, 0))
    model_fit = model.fit(disp=0)
예제 #8
0
def cnn_model(lookbacklength, lookforwardlength, test_split, batchsize,
              nb_epochs, shuffle):
    df = clean_grand()
    X_train, X_test, y_train, y_test = process_grand(
        df,
        lookbacklength=lookbacklength,
        lookforwardlength=lookforwardlength,
        test_split=test_split)
    #df = load_tsv("Old_Faithful_Logger")
    #X,y = temp_forecasting_shape_processing(df[['temp']],1000,200)
    #X_train,X_test,y_train,y_test = test_train_split(X,y,0.2,random=False)

    #X_train,X_test = np.expand_dims(X_train,axis=3),np.expand_dims(X_test,axis=3)
    input_shape = X_train.shape[1:]

    model = Sequential()
    #conv-pool 1
    model.add(Conv1D(16, kernel_size=24, input_shape=input_shape))
    model.add(LeakyReLU())
    model.add(MaxPooling1D(pool_size=3))
    #conv-pool 2
    model.add(Conv1D(16, kernel_size=12))
    model.add(LeakyReLU())
    model.add(MaxPooling1D(pool_size=3))
    #conv-pool 3
    model.add(Conv1D(16, kernel_size=6))
    model.add(LeakyReLU())
    model.add(MaxPooling1D(pool_size=3))
    #conv-pool 4
    model.add(Conv1D(16, kernel_size=3))
    model.add(LeakyReLU())
    model.add(MaxPooling1D(pool_size=3))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dense(32))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dense(1))

    model.compile(loss='mse', optimizer="adam")

    history = model.fit(X_train,
                        y_train,
                        batch_size=batchsize,
                        epochs=nb_epochs,
                        verbose=1,
                        shuffle=shuffle)

    y_pred = model.predict(X_test)

    save_keras_model(model, 'cnn')

    error_reporting_regression(y_test, y_pred)

    error_histogram(y_test, y_pred)

    error_time_series(y_test, y_pred)

    keras_training_loss_curve(history)