def train(self, models, x_train, y_train, x_test, y_test):
        x_train_predictions = np.hstack([m.predict(x_train) for m in models])
        x_test_predictions = np.hstack([m.predict(x_test) for m in models])

        lr = LinearRegression(normalize=True)
        lr.fit(x_train_predictions, y_train)
        y_pred_lr = lr.predict(x_test_predictions)
        print_regression_model_summary("LR", y_test, y_pred_lr, ParmsFromNormalization(mean=0,std=1,sqrtx2=1)())
        self.model = lr
        return lr
Exemple #2
0
    def train(self, models, x_train, y_train, x_test, y_test):
        x_train_predictions = np.hstack([m.predict(x_train) for m in models])
        x_test_predictions = np.hstack([m.predict(x_test) for m in models])

        lr = LinearRegression(normalize=True)
        lr.fit(x_train_predictions, y_train)
        y_pred_lr = lr.predict(x_test_predictions)
        print_regression_model_summary(
            "LR", y_test, y_pred_lr,
            ParmsFromNormalization(mean=0, std=1, sqrtx2=1)())
        self.model = lr
        return lr
         output_dim=layers[1],
         return_sequences=True,
         W_regularizer=l2(rgw),
         U_regularizer=l2(rgw)))
model.add(Dropout(dropout))

model.add(
    LSTM(layers[2],
         return_sequences=False,
         W_regularizer=l2(rgw),
         U_regularizer=l2(rgw)))
model.add(Dropout(dropout))

model.add(Dense(output_dim=layers[3]))
model.add(Activation("linear"))

model.compile(loss="mse", optimizer="rmsprop")

print("compiltion done")

model.fit(X_train,
          y_train,
          batch_size=512,
          nb_epoch=epochs,
          validation_split=0.05)
Y_predicted = model.predict(X_test)
print("Y_predicted shape", Y_predicted.shape)

print_regression_model_summary("RNN", y_test, Y_predicted,
                               parmsFromNormalization)
#X_all = np.column_stack((cycle_data, zscore_vals, entropy_vals, mavg1_vals, mavg2_vals, mavg4_vals, mavg8_vals, mavg16_vals))

X_all, Y_all = shuffle_data(X_all, Y_all)

X_train, X_test, y_train, y_test = train_test_split(training_set_size, X_all, Y_all)

print ("normalizing_factor", normalizing_factor)
#run_timeseries_froecasts(X_train, y_train, X_test, y_test, window_size, epoch_count=10, parmsFromNormalization=parmsFromNormalization)


configs = [
    #lr=0.01
    MLConfigs(nodes_in_layer=20, number_of_hidden_layers=3, dropout=0, activation_fn='relu', loss="mse",
              epoch_count=200, optimizer=Adam(lr=0.001)),
    #MLConfigs(nodes_in_layer=20, number_of_hidden_layers=3, dropout=0, activation_fn='relu', loss="mse",
    #          epoch_count=200, optimizer=Adam(lr=0.001), regularization=0.005),
    ]

#configs = create_rondomsearch_configs4DL((1,2,3), (5,10,15,20), (0, 0.1, 0.2, 0.4),
#                                        (0, 0.01, 0.001), (0.01, 0.001, 0.0001), 50)

index = 0
for c in configs:
    c.epoch_count = 200
    #c.nodes_in_layer = c.nodes_in_layer/(1-c.dropout)
    y_pred_dl = regression_with_dl(X_train, y_train, X_test, y_test, c)
    print ">> %d %s" %(index, str(c.tostr()))
    print_regression_model_summary("DL", y_test, y_pred_dl, parmsFromNormalization)
    index = index + 1

#    y_pred_dl = regression_with_dl(X_train, y_train, X_test, y_test, c)
#    print_regression_model_summary("DL" + str(c.tostr()), y_test, y_pred_dl)

size = 100000
x = np.random.zipf(2, size)
y = np.random.normal(10, 1, size)
xy = [x[i] * y[i] for i in range(size)]
xbyy = [x[i] / y[i] if y[i] != 0 else 1 for i in range(size)]

#target = [ 2*(2*x[i] + y[i])/3*y[i] for i in range(size)]
target = [2 * (2 * x[i] + y[i]) / 3 * y[i] for i in range(size)]

train_set_size = int(size * 0.7)

X_all, Y_all = normlaize_data(np.column_stack((x, y, xy, xbyy)),
                              np.array(target))
X_all, Y_all = shuffle_data(X_all, Y_all)

X_train, X_test, y_train, y_test = train_test_split(train_set_size, X_all,
                                                    Y_all)

c = MLConfigs(nodes_in_layer=4,
              number_of_hidden_layers=3,
              dropout=0,
              activation_fn='relu',
              loss="mse",
              epoch_count=15,
              optimizer=Adam())
y_pred_dl = regression_with_dl(X_train, y_train, X_test, y_test, c)
print_regression_model_summary("DL" + str(c.tostr()), y_test, y_pred_dl)
Exemple #6
0
def combine_models(models, y_test):
    combined_forecast = [np.median(models[i]) for i in range(models.shape[0])]
    print_regression_model_summary("CombinedMedian", y_test, combined_forecast)

    combined_forecast = [np.mean(models[i]) for i in range(models.shape[0])]
    print_regression_model_summary("CombinedMean", y_test, combined_forecast)
#

model.add(LSTM(
    input_dim=layers[0],
    output_dim=layers[1],
    return_sequences=True, W_regularizer=l2(rgw), U_regularizer=l2(rgw)))
model.add(Dropout(dropout))

model.add(LSTM(
    layers[2],
    return_sequences=False, W_regularizer=l2(rgw), U_regularizer=l2(rgw)))
model.add(Dropout(dropout))

model.add(Dense(
    output_dim=layers[3]))
model.add(Activation("linear"))

model.compile(loss="mse", optimizer="rmsprop")

print("compiltion done")

model.fit(X_train, y_train, batch_size=512, nb_epoch=epochs, validation_split=0.05)
Y_predicted = model.predict(X_test)
print("Y_predicted shape", Y_predicted.shape)

print_regression_model_summary("RNN", y_test, Y_predicted, parmsFromNormalization)



                                                    Y_all)

print("normalizing_factor", normalizing_factor)
#run_timeseries_froecasts(X_train, y_train, X_test, y_test, window_size, epoch_count=10, parmsFromNormalization=parmsFromNormalization)

configs = [
    #lr=0.01
    MLConfigs(nodes_in_layer=20,
              number_of_hidden_layers=3,
              dropout=0,
              activation_fn='relu',
              loss="mse",
              epoch_count=200,
              optimizer=Adam(lr=0.001)),
    #MLConfigs(nodes_in_layer=20, number_of_hidden_layers=3, dropout=0, activation_fn='relu', loss="mse",
    #          epoch_count=200, optimizer=Adam(lr=0.001), regularization=0.005),
]

#configs = create_rondomsearch_configs4DL((1,2,3), (5,10,15,20), (0, 0.1, 0.2, 0.4),
#                                        (0, 0.01, 0.001), (0.01, 0.001, 0.0001), 50)

index = 0
for c in configs:
    c.epoch_count = 200
    #c.nodes_in_layer = c.nodes_in_layer/(1-c.dropout)
    y_pred_dl = regression_with_dl(X_train, y_train, X_test, y_test, c)
    print ">> %d %s" % (index, str(c.tostr()))
    print_regression_model_summary("DL", y_test, y_pred_dl,
                                   parmsFromNormalization)
    index = index + 1
def combine_models(models, y_test):
    combined_forecast = [ np.median(models[i]) for i in range(models.shape[0])]
    print_regression_model_summary("CombinedMedian", y_test, combined_forecast)

    combined_forecast = [ np.mean(models[i]) for i in range(models.shape[0])]
    print_regression_model_summary("CombinedMean", y_test, combined_forecast)
    return preprocessing.normalize(X_all.astype("float32"), norm='l2'), preprocessing.normalize(Y_all.astype("float32"), norm='l2')[0]



#for c in configs:
#    y_pred_dl = regression_with_dl(X_train, y_train, X_test, y_test, c)
#    print_regression_model_summary("DL" + str(c.tostr()), y_test, y_pred_dl)

size = 100000
x = np.random.zipf(2, size)
y = np.random.normal(10, 1, size)
xy = [ x[i]*y[i] for i in range(size)]
xbyy = [ x[i]/y[i] if y[i] != 0 else 1 for i in range(size)]

#target = [ 2*(2*x[i] + y[i])/3*y[i] for i in range(size)]
target = [ 2*(2*x[i] + y[i])/3*y[i] for i in range(size)]

train_set_size = int(size*0.7)

X_all, Y_all = normlaize_data(np.column_stack((x,y, xy, xbyy)), np.array(target))
X_all, Y_all = shuffle_data(X_all,Y_all)


X_train, X_test, y_train, y_test = train_test_split(train_set_size, X_all, Y_all)

c = MLConfigs(nodes_in_layer=4, number_of_hidden_layers=3, dropout=0, activation_fn='relu', loss="mse",
              epoch_count=15, optimizer=Adam())
y_pred_dl = regression_with_dl(X_train, y_train, X_test, y_test, c)
print_regression_model_summary("DL" + str(c.tostr()), y_test, y_pred_dl)