예제 #1
0
파일: main.py 프로젝트: fripSide/Main
def run_dbn():
    X_train, Y_train, X_test, Y_test = normalize_data()
    regressor = SupervisedDBNRegression(hidden_layers_structure=[100],
                                        learning_rate_rbm=0.01,
                                        learning_rate=0.01,
                                        n_epochs_rbm=20,
                                        n_iter_backprop=200,
                                        batch_size=16,
                                        activation_function='relu')
    # Y_train = np.reshape(Y_train, (79, 2))
    print(X_train.shape, Y_train.shape)

    regressor.fit(X_train, Y_train)

    # Test
    Y_pred = regressor.predict(X_test)
    Y_pred = np.delete(Y_pred, np.s_[-1:], axis=1)
    Y_test = np.delete(Y_test, np.s_[-1:], axis=1)
    print("Y_pred", Y_pred)
    print("Y_test", Y_test)
    print('Done.\nR-squared: %f\nMSE: %f' %
          (r2_score(Y_test, Y_pred), mean_squared_error(Y_test, Y_pred)))
    # print("mean_absolute_error", mean_absolute_error(Y_test, Y_pred) / Y_test.shape[0])
    # print("len", Y_test.shape[0])
    abs_sum = 0
    for i in range(0, 13):
        v1 = Y_pred[i][0]
        v2 = Y_test[i][0]
        print(v1, v2)
        abs_sum += abs(v1 - v2)
    print("final: ", abs_sum, abs_sum / 13)
예제 #2
0
def train_model(learning_rate_rbm, learning_rate, batch_size, x_train, y_trian, x_test):
    path_DBN = os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), "models"), "deep-belief-network")
    sys.path.append(path_DBN)
    from dbn.tensorflow import SupervisedDBNRegression

    regressor_DBN = SupervisedDBNRegression(learning_rate_rbm=learning_rate_rbm, learning_rate=learning_rate,
                                            batch_size=batch_size, verbose=False)
    regressor_DBN.fit(x_train, y_trian)
    pred = regressor_DBN.predict(x_test)
    return pred
예제 #3
0
    def update_mse(tmp_input_element, tmp_list):
        data_train, label_train, data_test, label_test = \
            HSMemory.create_train_and_test_data(tmp_list, tmp_input_element.number_visible_input)
        tmp_regression = SupervisedDBNRegression(
            hidden_layers_structure=[
                tmp_input_element.number_visible_input,
                tmp_input_element.number_hidden_input
            ],
            learning_rate_rbm=tmp_input_element.learning_rate_rbm,
            learning_rate=tmp_input_element.learning_rate,
            n_epochs_rbm=tmp_input_element.n_epochs_rbm,
            n_iter_backprop=tmp_input_element.n_iter_back_prop,
            contrastive_divergence_iter=tmp_input_element.
            contrastive_divergence_iter,
            batch_size=tmp_input_element.batch_size,
            activation_function=tmp_input_element.activation_function,
            n_hidden_layers_mlp=tmp_input_element.n_hidden_layers_mlp,
            cost_function_name=tmp_input_element.cost_function_name)

        tmp_regression.fit(data_train, label_train)  # train data
        tmp_input_element.train_mse = sum(
            tmp_regression.train_loss) / HSElement.config_n_iter_back_prop

        y_pred_test = tmp_regression.predict(data_test)
        check_nan = np.isnan(y_pred_test).any()

        if check_nan:
            tmp_input_element.test_mse = 1000
        else:
            tmp_input_element.test_mse = mean_squared_error(
                label_test, y_pred_test)
        if np.isnan(tmp_input_element.train_mse) or np.isinf(
                tmp_input_element.train_mse):
            tmp_input_element.train_mse = 1000

        # add to export result
        tmp_result_data = [
            tmp_input_element.learning_rate_rbm,
            tmp_input_element.learning_rate,
            tmp_input_element.number_visible_input,
            tmp_input_element.number_hidden_input, tmp_input_element.train_mse,
            tmp_input_element.test_mse, '', '', '', '', '', '', '', ''
        ]
        TensorGlobal.followHs.append(tmp_result_data)

        TensorGlobal.sessFlg = True
        tf.reset_default_graph()
        del tmp_regression
        return tmp_input_element
예제 #4
0
 def create_random_model():
     #tmp_learning_rate_rbm = 0.001 + uniform(0, 1) * (0.5 - 0.001) #lorenz
     RandomRegression.tmp_learning_rate_rbm = 0.005 + uniform(0, 1) * (
         0.2 - 0.005)  #random
     #tmp_learning_rate = 0.01 + uniform(0, 1) * (0.9 - 0.01) #lorenz
     RandomRegression.tmp_learning_rate = 0.0008 + uniform(0, 1) * (
         0.08 - 0.0008)  #random
     tmp_regressor = SupervisedDBNRegression(
         hidden_layers_structure=[
             RandomRegression.number_visible_input,
             RandomRegression.number_hidden_input
         ],
         learning_rate_rbm=RandomRegression.tmp_learning_rate_rbm,
         learning_rate=RandomRegression.tmp_learning_rate,
         #n_epochs_rbm=100, #lorenz
         #n_epochs_rbm=150,
         #n_epochs_rbm=30,
         n_epochs_rbm=RandomRegression.number_iter_rbm_loop,
         n_iter_backprop=RandomRegression.number_iter_backprop,
         #n_iter_backprop=50,
         contrastive_divergence_iter=2,
         batch_size=32,
         activation_function='relu',
         n_hidden_layers_mlp=1,
         cost_function_name='mse')
     return tmp_regressor, RandomRegression.tmp_learning_rate_rbm, RandomRegression.tmp_learning_rate
예제 #5
0
def train_model(learning_rate_rbm, learning_rate, batch_size, x_train, y_train, x_test, message_queue):
    path_DBN = os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)), "models"), "deep-belief-network")
    sys.path.append(path_DBN)
    from dbn.tensorflow import SupervisedDBNRegression

    regressor_DBN = SupervisedDBNRegression(hidden_layers_structure=[100],
                                            learning_rate_rbm=learning_rate_rbm,
                                            learning_rate=learning_rate,
                                            n_epochs_rbm=20,
                                            n_iter_backprop=200,
                                            batch_size=batch_size,
                                            activation_function='sigmoid',
                                            verbose=False)
    regressor_DBN.fit(x_train, y_train)
    pred = regressor_DBN.predict(x_test)
    message_queue.put(pred)
    return
예제 #6
0
def build_RBM(num_bp, epoch_pretrain=25, batch_size=24, hidDim=[100,140]):
    regressor = SupervisedDBNRegression(hidden_layers_structure=hidDim,
                                    learning_rate_rbm=0.01,
                                    learning_rate=0.01,
                                    n_epochs_rbm=epoch_pretrain,
                                    n_iter_backprop=num_bp,
                                    batch_size=batch_size,
                                    activation_function='tanh')
    return regressor
예제 #7
0
def gerar_dbn(topologia=[10], tx_rbm=0.1, tx_apr=0.3, n_epoc_rbm=0, n_iter=20, funcao='relu'  ):
	from dbn.tensorflow import SupervisedDBNRegression
	
	
	dbn = SupervisedDBNRegression(hidden_layers_structure=topologia,
                                        learning_rate_rbm=tx_rbm,
                                        learning_rate=tx_apr,
                                        n_epochs_rbm=n_epoc_rbm,
                                        n_iter_backprop=n_iter,
                                        batch_size=1,
                                        activation_function=funcao)
										
	return dbn
예제 #8
0
 def create_random_model():
     RandomRegressionDolar.tmp_learning_rate_rbm = 0.0005 + uniform(0, 1) * (0.05 - 0.0005) #random
     RandomRegressionDolar.tmp_learning_rate = 0.00005 + uniform(0, 1) * (0.005 - 0.00005) #random
     tmp_regressor = SupervisedDBNRegression(hidden_layers_structure=[RandomRegressionDolar.number_visible_input,
                                                                      RandomRegressionDolar.number_hidden_input],
                                             learning_rate_rbm=RandomRegressionDolar.tmp_learning_rate_rbm,
                                             learning_rate=RandomRegressionDolar.tmp_learning_rate,
                                             n_epochs_rbm=RandomRegressionDolar.number_iter_rbm_loop,
                                             n_iter_backprop=RandomRegressionDolar.number_iter_backprop,
                                             contrastive_divergence_iter=2,
                                             batch_size=32,
                                             activation_function='relu',
                                             n_hidden_layers_mlp=1,
                                             cost_function_name='mse')
     return tmp_regressor, RandomRegressionDolar.tmp_learning_rate_rbm, RandomRegressionDolar.tmp_learning_rate
예제 #9
0
#print(X.shape[0])

#stdScaler = StandardScaler()
min_max_scaler = MinMaxScaler()
X = min_max_scaler.fit_transform(X)

#print('x_train number: ', X.shape[0])
#print('x_test number: ', X_test.shape[0])
#print('Y_train number: ', Y.shape[0])
#print('y_test number: ', Y_test)

regressor = SupervisedDBNRegression(hidden_layers_structure=[100],
                                    learning_rate_rbm=0.01,
                                    learning_rate=0.01,
                                    n_epochs_rbm=10,
                                    n_iter_backprop=100,
                                    batch_size=16,
                                    activation_function='relu')

regressor.fit(X, Y)

# Save the model
regressor.save('models/abalone_3.pkl')

# Restore it
#regressor = SupervisedDBNRegression.load('models/abalone_2.pkl')

# Test
data1 = pd.read_csv("abalone_test.csv")
예제 #10
0
Y_train = Y_raw.iloc[]

X_test = X_raw.iloc[]
X_train = X_raw.iloc[]



# Data scaling
min_max_scaler = MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)

# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=[7],
                                    learning_rate_rbm=0.01,
                                    learning_rate=0.0001,
                                    n_epochs_rbm=500,  
                                    n_iter_backprop=7409, # run more iter
                                    batch_size=40,
                                    activation_function='relu')
regressor.fit(X_train, Y_train)


# Test
X_test = min_max_scaler.transform(X_test)
Y_pred = regressor.predict(X_test)


mse = mean_squared_error(Y_test, Y_pred)
print('Done.\nR-squared: %f\nMSE: %f' % (r2_score(Y_test, Y_pred), mse))

def mean_absolute_percentage_error(y_test, y_pred): 
# Data scaling
min_max_scaler = MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)

LEARNING_RATE_BASE = 0.01  # 最初学习率
LEARNING_RATE_DECAY = 0.99  # 学习率的衰减率
LEARNING_RATE_STEP = 100  # 喂入多少轮BATCH-SIZE以后,更新一次学习率。一般为总样本数量/BATCH_SIZE
# 计数器,用来记录运行了几轮的BATCH_SIZE,初始为0,设置为不可训练
gloabl_steps = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                           gloabl_steps,
                                           LEARNING_RATE_STEP,
                                           LEARNING_RATE_DECAY,
                                           staircase=True)
print(X_train.shape)
# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=[80],
                                    learning_rate_rbm=0.01,
                                    learning_rate=learning_rate,
                                    n_epochs_rbm=1,
                                    n_iter_backprop=200,
                                    batch_size=16,
                                    activation_function='relu')
regressor.fit(X_train, Y_train)

# Test
X_test = min_max_scaler.transform(X_test)
Y_pred = regressor.predict(X_test)
print('Done.\nR-squared: %f\nMSE: %f' %
      (r2_score(Y_test, Y_pred), mean_squared_error(Y_test, Y_pred)))
예제 #12
0
def train_model(learning_rate, periods, batch_size, feature, label,
                path_out_png):
    path_DBN = os.path.join(
        os.path.join(os.path.dirname(os.path.abspath(__file__)), "models"),
        "deep-belief-network")
    sys.path.append(path_DBN)
    from dbn.tensorflow import SupervisedDBNRegression

    X_train, X_test, Y_train, Y_test = train_test_split(feature,
                                                        label,
                                                        test_size=0.2,
                                                        shuffle=False)

    train_steps_per_period = X_train.shape[0] // periods  # floor
    test_steps_per_period = X_test.shape[0] // periods
    # print(X_train)
    # print(Y_train)
    '''regressor_DBN = SupervisedDBNRegression(hidden_layers_structure=[100],
                                            learning_rate_rbm=0.01,
                                            learning_rate=learning_rate,
                                            n_epochs_rbm=20,
                                            n_iter_backprop=200,
                                            batch_size=batch_size,
                                            activation_function='sigmoid',
                                            verbose=False)'''
    regressor_DBN = SupervisedDBNRegression(learning_rate=learning_rate,
                                            batch_size=batch_size,
                                            verbose=False)

    print("Training model...")
    print("RMSE (on training data):")
    root_mean_squared_errors = []
    for period in range(0, periods):
        x_train = np.array(
            X_train[period * train_steps_per_period:(period + 1) *
                    train_steps_per_period])
        # x_train=x_train.reshape(x_train.size,1)
        y_trian = np.array(
            Y_train[period * train_steps_per_period:(period + 1) *
                    train_steps_per_period])
        # y_trian=y_trian.reshape(y_trian.size,1,1)
        # print(x_train)
        # print(y_trian)
        regressor_DBN.fit(x_train, y_trian)

        x_test = X_test[period * test_steps_per_period:(period + 1) *
                        test_steps_per_period]
        y_test = Y_test[period * test_steps_per_period:(period + 1) *
                        test_steps_per_period]
        predictions = regressor_DBN.predict(x_test)
        # predictions = np.array([predictions])
        # print(predictions.shape)
        # print(y_test.shape)

        root_mean_squared_error = math.sqrt(
            mean_squared_error(y_test, predictions))

        print("  period %02d : %0.2f" % (period, root_mean_squared_error))

        root_mean_squared_errors.append(root_mean_squared_error)

    print("Model training finished.")

    # Output a graph of loss metrics over periods.
    plt.subplot(1, 2, 2)
    plt.ylabel('RMSE')
    plt.xlabel('Periods')
    plt.title("Root Mean Squared Error vs. Periods")
    plt.tight_layout()
    plt.plot(root_mean_squared_errors)
    plt.savefig(path_out_png)

    print("Final RMSE (on training data): %0.2f" % root_mean_squared_error)
예제 #13
0
use_CCA_data = True
use_deep = False
step = 1
train_deep = 50
train_start = 51
predict_start = 52

assert step > 0
assert train_deep >= step and train_start >= train_deep
assert predict_start > train_start
assert not (True == use_all_data and True == use_CCA_data)

regressor_DBN = SupervisedDBNRegression(hidden_layers_structure=[100],
                                        learning_rate_rbm=0.01,
                                        learning_rate=0.01,
                                        n_epochs_rbm=20,
                                        n_iter_backprop=200,
                                        batch_size=32,
                                        activation_function='sigmoid',
                                        verbose=False)
regressor_AdaBoost = AdaBoostRegressor()
regressor_DBNAdaBoost = AdaBoostRegressor(SupervisedDBNRegression(
    hidden_layers_structure=[100],
    learning_rate_rbm=0.01,
    learning_rate=0.01,
    n_epochs_rbm=20,
    n_iter_backprop=200,
    batch_size=16,
    activation_function='sigmoid',
    verbose=False),
                                          loss="square",
                                          n_estimators=250,
예제 #14
0
X = min_max_scaler.fit_transform(X)

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.25)

#print('X: ', X.shape[1])
#print('Y: ', Y)
#print('Y_train number: ', Y_train[0])
#print('y_test number: ', Y_test.shape[0])

#'''

# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=[10, 100],
                                    learning_rate_rbm=0.01,
                                    learning_rate=0.01,
                                    n_epochs_rbm=20,
                                    n_iter_backprop=100,
                                    batch_size=16,
                                    activation_function='relu')
regressor.fit(X_train, Y_train)

# Save the model
regressor.save('models/taipei_data.pkl')

# Restore it
#regressor = SupervisedDBNRegression.load('models/model_regression.pkl')

# Test
X_test = min_max_scaler.transform(X_test)
Y_pred = regressor.predict(X_test)
print('Done.\nR-squared: %f\nMSE: %f\nMAPE: %f' %
예제 #15
0
def DBN_Run(hidden_layers_struc, learnin_rate_rbm, learnin_rate,
            num_epochs_rbm, num_iter_backprop, batchSize, activation_func):
    # This is a parameter for the start date of the model, it must be entered in the format Month/Day/Year like 2/6/2019
    # The program will find the next closest date to the one listed by iterating forware in the calendar (no leap years)
    # Enter post 1/3/1950 for Volume data to be there
    startDate = "6/26/2000"

    # defines a start time for the project job
    startTime = time.time()
    # Creates the output file director if it is not already created
    if not os.path.isdir("benchmark-output"):
        os.mkdir("benchmark-output")

    # Loading dataset
    SP_Data_Full = pd.read_csv("data-files/S&P_Movement.csv", sep=',')

    # Change start index based on the given start date and read in new dataframe
    startIndex = SP_Data_Full.index[SP_Data_Full["Date"] == startDate].values
    SP_Data = SP_Data_Full.iloc[startIndex[0]:-1]
    SP_Data.reset_index(inplace=True)

    # Shift the data set so that the model is reading in the previous day's information on the High, Low, Close, Volume
    # and Movement
    SP_Data["PrevHigh"] = SP_Data["High"].shift(-1)
    SP_Data["PrevLow"] = SP_Data["Low"].shift(-1)
    SP_Data["PrevClose"] = SP_Data["Close"].shift(-1)
    SP_Data["PrevVolume"] = SP_Data["Volume"].shift(-1)
    SP_Data["PrevMovement"] = SP_Data["Movement"].shift(-1)

    # split the new dataframe into features and targets
    target = SP_Data[["Close"], ["Movement"]]
    features = SP_Data[[
        "Date", "Open", "PrevHigh", "PrevLow", "PrevVolume", "PrevMovement"
    ]]

    features = format_year(features)

    # format the dataframe as a numpy array for the tensorflow functions
    target_array = target.values
    features_array = features.values
    X, Y = features_array, target_array

    # Splitting data into test and train
    X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                        Y,
                                                        test_size=0.2,
                                                        random_state=1337)

    # Data scaling
    min_max_scaler = MinMaxScaler()
    X_train = min_max_scaler.fit_transform(X_train)

    # Training
    regressor = SupervisedDBNRegression(
        hidden_layers_structure=hidden_layers_struc,
        learning_rate_rbm=learnin_rate_rbm,
        learning_rate=learnin_rate,
        n_epochs_rbm=num_epochs_rbm,
        n_iter_backprop=num_iter_backprop,
        batch_size=batchSize,
        activation_function=activation_func)
    regressor.fit(X_train, Y_train)

    trainingTime = time.time() - startTime
    # Test
    X_test = min_max_scaler.transform(X_test)
    Y_pred = regressor.predict(X_test)
    r2 = r2_score(Y_test, Y_pred)
    mse = mean_squared_error(Y_test, Y_pred)
    testingTime = time.time() - startTime - trainingTime

    totalRunTime = time.time() - startTime
    # Text output file for more user friendly reading
    # formated based on the input into this function as the hyperparameters in order delimited by the _
    file = open(
        "benchmark-output/result_" + str(hidden_layers_struc) + "_" +
        str(learnin_rate_rbm) + "_" + str(learnin_rate) + "_" +
        str(num_epochs_rbm) + "_" + str(num_iter_backprop) + "_" +
        str(batchSize) + "_" + str(activation_func) + ".txt", "w+")
    file.write('Done.\nR-squared: %f\nMSE: %f' % (r2, mse) + "\n")
    file.write("Training Time: " + str(trainingTime) + "\n")
    file.write("Testing Time: " + str(testingTime) + "\n")
    file.write("Total Run Time: " + str(totalRunTime) + "\n\n")
    file.write("Network Information:")
    file.write("Hidden Layer Structure: " + str(hidden_layers_struc) + "\n")
    file.write("Learning Rate RBM: " + str(learnin_rate_rbm) + "\n")
    file.write("Learning Rate: " + str(learnin_rate) + "\n")
    file.write("Number of Epochs: " + str(num_epochs_rbm) + "\n")
    file.write("Number of Iterative Backpropogations: " +
               str(num_iter_backprop) + "\n")
    file.write("Batch Size: " + str(batchSize) + "\n")
    file.write("Activation Function: " + str(activation_func) + "\n")
    file.close()
    # CSV file output for use in data visualization
    hiddenlayerNumNodes = hidden_layers_struc[0]
    hiddenlayerNum = hidden_layers_struc[1]
    file = open(
        "benchmark-output/result_" + str(hidden_layers_struc) + "_" +
        str(learnin_rate_rbm) + "_" + str(learnin_rate) + "_" +
        str(num_epochs_rbm) + "_" + str(num_iter_backprop) + "_" +
        str(batchSize) + "_" + str(activation_func) + ".csv", "w+")
    file.write(
        "R-squared,MSE,trainingTime,testingTime,totalRunTime,hiddenlayerNumNodes,hiddenlayerNum,learnin_rate_rbm"
        ",learnin_rate,num_epochs_rbm,num_iter_backprop,batchSize,activation_func\n"
    )
    file.write(
        str(r2) + "," + str(mse) + "," + str(trainingTime) + "," +
        str(testingTime) + "," + str(totalRunTime) + "," +
        str(hiddenlayerNumNodes) + "," + str(hiddenlayerNum) + "," +
        str(learnin_rate_rbm) + "," + str(learnin_rate) + "," +
        str(num_epochs_rbm) + "," + str(num_iter_backprop) + "," +
        str(batchSize) + "," + str(activation_func))
    file.close()
예제 #16
0
data_rs_x_2013 = pd.concat([Lst_rs_2013,Rndaily_rs_2013,Ndvi_rs_2013],axis=1).values
data_rs_x_2014 = pd.concat([Lst_rs_2014,Rndaily_rs_2014,Ndvi_rs_2014],axis=1).values
data_rs_x_2015 = pd.concat([Lst_rs_2015,Rndaily_rs_2015,Ndvi_rs_2015],axis=1).values
data_rs_x_2016 = pd.concat([Lst_rs_2016,Rndaily_rs_2016,Ndvi_rs_2016],axis=1).values
#How to use pd.concat:https://blog.csdn.net/stevenkwong/article/details/52528616

data_rs_x_2013_mm = MinMaxScaler().fit_transform(data_rs_x_2013)
data_rs_x_2014_mm = MinMaxScaler().fit_transform(data_rs_x_2014)
data_rs_x_2015_mm = MinMaxScaler().fit_transform(data_rs_x_2015)
data_rs_x_2016_mm = MinMaxScaler().fit_transform(data_rs_x_2016)

# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=[100],
                                    learning_rate_rbm=0.01,
                                    learning_rate=0.01,
                                    n_epochs_rbm=20,
                                    n_iter_backprop=300,
                                    batch_size=16,
                                    activation_function='relu')
regressor.fit(x_train, y_train)

# Test
#X_test = min_max_scaler.transform(X_test)
y_pred = regressor.predict(x_test)
print('Done.\nR-squared: %f\nMSE: %f' % (r2_score(y_test, y_pred), mean_squared_error(y_test, y_pred)))

r, p = pearsonr(y_test.flatten(), y_pred.flatten())
r2 = r2_score(y_test.flatten(), y_pred.flatten())
MAE = mean_absolute_error(y_test.flatten(), y_pred.flatten())
MSE = mean_squared_error(y_test.flatten(), y_pred.flatten())
예제 #17
0
                                                    random_state=1337)

# Data scaling
min_max_scaler = MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)

#print('x_train number: ', X_train[0])
#print('x_test number: ', X_test.shape[1])
#print('Y_train number: ', Y_train[0])
#print('y_test number: ', Y_test.shape[0])

# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=[100],
                                    learning_rate_rbm=0.01,
                                    learning_rate=0.01,
                                    n_epochs_rbm=20,
                                    n_iter_backprop=200,
                                    batch_size=16,
                                    activation_function='relu')
#regressor.fit(X_train, Y_train)

# Save the model
#regressor.save('model_regression_128.pkl')

# Restore it
regressor = SupervisedDBNRegression.load('models/model_regression.pkl')

# Test
X_test = min_max_scaler.transform(X_test)
Y_pred = regressor.predict(X_test)
print('Done.\nR-squared: %f\nMSE: %f' %
from sklearn.preprocessing import MinMaxScaler

from dbn.tensorflow import SupervisedDBNRegression


# Loading dataset
boston = load_boston()
X, Y = boston.data, boston.target

# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)

# Data scaling
min_max_scaler = MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)

# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=[100],
                                    learning_rate_rbm=0.01,
                                    learning_rate=0.01,
                                    n_epochs_rbm=20,
                                    n_iter_backprop=200,
                                    batch_size=16,
                                    activation_function='relu')
regressor.fit(X_train, Y_train)

# Test
X_test = min_max_scaler.transform(X_test)
Y_pred = regressor.predict(X_test)
print 'Done.\nR-squared: %f\nMSE: %f' % (r2_score(Y_test, Y_pred), mean_squared_error(Y_test, Y_pred))