Esempio n. 1
0
def generate_multi_network(X, Y):
    # Modelling
    adam = Adam()

    callbacks_list = [
        LearningRateScheduler(lr_schedule, verbose = 0),
    ]
    model = nng.NN_generator(4, 50, np.shape(X)[1], np.shape(Y)[1])

    model.compile(
        loss = 'mean_squared_error', #mean squared error
        optimizer = adam
    )

    model.fit(X, Y, epochs=100, batch_size=128, verbose = 2, callbacks = callbacks_list, validation_split = 0.1, shuffle=True)

    return model
Esempio n. 2
0
def singleNN(inputArray : np.ndarray, outputArray : np.ndarray, nLayers : int, nNeurons : int, modelname : str):
    X_train, X_test, y_train, y_test = train_test_split(input2, output_data, test_size=0.3, random_state=42)

    norm_features = StandardScaler() #MinMaxScaler(feature_range = (-1, 1))
    norm_labels = StandardScaler()

    X_train_norm = norm_features.fit_transform(X_train)
    Y_train_norm = norm_labels.fit_transform(y_train)

    X_test_norm = norm_features.transform(X_test)
    Y_test_norm = norm_labels.transform(y_test)

    model = nng.NNGenerator(4, 1000, np.shape(input2)[1], np.shape(output_data)[1])

    adam = Adam(lr = 0.01)

    model.compile(
        loss = 'mean_squared_error', #mean squared error
        optimizer = adam
        )

    callbacks_list = [
        LearningRateScheduler(lr_schedule, verbose = 0),
        EarlyStopping(monitor='val_loss', patience=15)
    ]

    model.fit(X_train_norm, Y_train_norm, epochs=100, batch_size=1024, verbose = 2, callbacks = callbacks_list, validation_split = 0.1)

    score=model.evaluate(X_test_norm, Y_test_norm, verbose=2)

    print(score)

    no = 0
    for i in range(1, 100):
        saveString = "Models/HestonSinglePrice/Heston_price_single_"+str(i)+".h5"
        no = i
        if os.path.isfile(saveString) == False:
            break

    # Saving model
    model.save("Models/HestonSinglePrice/Heston_price_single_"+str(no)+".h5")

    # Saving normalization parameters
    joblib.dump(norm_features, "Models/HestonSinglePrice/norm_features_price_"+str(no)+".pkl")
    joblib.dump(norm_labels, "Models/HestonSinglePrice/norm_labels_price_"+str(no)+".pkl")
Esempio n. 3
0
        return a

strike = 100
sigma = 0.2 
mat = 1

X, Y = dg.BSDataGenerator(strike, sigma, mat, 300000)

norm_features = StandardScaler() #MinMaxScaler(feature_range = (-1, 1))
norm_labels = StandardScaler()

norm_x = norm_features.fit_transform(X)
norm_y = norm_labels.fit_transform(Y)

# Modelling
model = nng.NNGenerator(4, 5, 1, 1)

adam = Adam(lr = 0.1)

model.compile(
    loss = 'mean_squared_error', #mean squared error
    optimizer = adam
    )

callbacks_list = [
    LearningRateScheduler(lr_schedule, verbose = 0),
    EarlyStopping(monitor='val_loss', patience=25)
]

model.fit(norm_x, norm_y, epochs=100, batch_size=1024, verbose = 0, callbacks = callbacks_list, validation_split = 0.1)
Esempio n. 4
0
train_input = np.loadtxt("./Data/train_input_sabr.csv", delimiter=",")
train_output = np.loadtxt("./Data/train_output_sabr_approx.csv", delimiter=",")
test_input = np.loadtxt("./Data/test_input_sabr.csv", delimiter=",")
test_output = np.loadtxt("./Data/test_output_sabr_approx.csv", delimiter=",")

norm_features = StandardScaler()  #MinMaxScaler(feature_range = (-1, 1))
norm_labels = StandardScaler()

norm_x = norm_features.fit_transform(train_input)
norm_y = norm_labels.fit_transform(train_output)

norm_x_test = norm_features.transform(test_input)
norm_y_test = norm_labels.transform(test_output)

model = nng.NNGenerator(4, 5, 14, 10)

adam = Adam(lr=0.1)

model.compile(
    loss='mean_squared_error',  #mean squared error
    optimizer=adam)

callbacks_list = [
    LearningRateScheduler(lr_schedule, verbose=0),
    EarlyStopping(monitor='val_loss', patience=25)
]

model.fit(norm_x,
          norm_y,
          epochs=100,
Esempio n. 5
0
def NNModelNext(data_set: list, folder: str, model_name: str, n_layers: int,
                n_neurons: int, nn_type: str, output_scaling: str,
                input_scaling: str) -> float:
    def lr_schedule(epoch, rate):
        seq = 10**np.linspace(start=-10, stop=0, num=100)
        return seq[epoch]

    X_train = data_set[0]
    X_test = data_set[1]
    Y_train = data_set[2]
    Y_test = data_set[3]

    if input_scaling == "standardize":
        norm_features = StandardScaler()
        normal_in = True
    elif input_scaling == "normalize":
        norm_features = MinMaxScaler()
        normal_in = True
    else:
        normal_in = False

    if output_scaling == "standardize":
        norm_labels = StandardScaler()
        normal_out = True
    elif output_scaling == "normalize":
        norm_labels = MinMaxScaler()
        normal_out = True
    else:
        normal_out = False

    if normal_in:
        X_train = norm_features.fit_transform(X_train)
        X_test = norm_features.transform(X_test)

    if normal_out:
        Y_train = norm_labels.fit_transform(Y_train)
        Y_test = norm_labels.transform(Y_test)

    if nn_type == "normal":
        model = nng.NN_generator(n_layers, n_neurons,
                                 np.shape(X_train)[1],
                                 np.shape(Y_train)[1])
    elif nn_type == "tanh":
        model = nng.NN_generator_tanh(n_layers, n_neurons,
                                      np.shape(X_train)[1],
                                      np.shape(Y_train)[1])
    elif nn_type == "mix":
        model = nng.NN_generator_mix(n_layers, n_neurons,
                                     np.shape(X_train)[1],
                                     np.shape(Y_train)[1])
    else:
        model = nng.NN_generator_mix_noise(n_layers, n_neurons,
                                           np.shape(X_train)[1],
                                           np.shape(Y_train)[1])

    adam = Adam()

    model.compile(
        loss='mean_squared_error',  #mean squared error
        optimizer=adam)

    callbacks_list = [LearningRateScheduler(lr_schedule, verbose=1)]

    start_time = time.time()
    loss_history = model.fit(X_train,
                             Y_train,
                             epochs=100,
                             batch_size=1024,
                             verbose=1,
                             callbacks=callbacks_list,
                             validation_split=0.1,
                             shuffle=True)
    stop_time = time.time()

    return loss_history
Esempio n. 6
0
print(np.shape(output2))

output_data = np.reshape(output2, (-1, 1))

X_train, X_test, y_train, y_test = train_test_split(input2, output_data, test_size=0.3, random_state=42)

norm_features = StandardScaler() #MinMaxScaler(feature_range = (-1, 1))
norm_labels = StandardScaler()

X_train_norm = norm_features.fit_transform(X_train)
Y_train_norm = norm_labels.fit_transform(y_train)

X_test_norm = norm_features.transform(X_test)
Y_test_norm = norm_labels.transform(y_test)

model = nng.NNGenerator(4, 1000, np.shape(input2)[1], np.shape(output_data)[1])

adam = Adam(lr = 0.1)

model.compile(
    loss = 'mean_squared_error', #mean squared error
    optimizer = adam
    )

callbacks_list = [
    LearningRateScheduler(lr_schedule, verbose = 0),
    EarlyStopping(monitor='val_loss', patience=15)
]

model.fit(X_train_norm, Y_train_norm, epochs=100, batch_size=1024, verbose = 2, callbacks = callbacks_list, validation_split = 0.1)
Esempio n. 7
0
def NNModelNext(data_set: list,
                folder: str,
                model_name: str,
                n_layers: int,
                n_neurons: int,
                nn_type: str,
                output_scaling: str,
                input_scaling: str,
                include_loss: bool = False) -> float:
    model_save = "Models5/" + folder + "/" + model_name + "_" + str(
        n_layers) + "_" + str(n_neurons) + ".h5"
    model_path = "Models5/" + folder + "/"

    if not os.path.exists(model_path):
        os.makedirs(model_path)

    X_train = data_set[0]
    X_test = data_set[1]
    Y_train = data_set[2]
    Y_test = data_set[3]

    if input_scaling == "standardize":
        norm_features = StandardScaler()
        normal_in = True
    elif input_scaling == "normalize":
        norm_features = MinMaxScaler()
        normal_in = True
    else:
        normal_in = False

    if output_scaling == "standardize":
        norm_labels = StandardScaler()
        normal_out = True
    elif output_scaling == "normalize":
        norm_labels = MinMaxScaler()
        normal_out = True
    else:
        normal_out = False

    if normal_in:
        X_train = norm_features.fit_transform(X_train)
        X_test = norm_features.transform(X_test)
        ### saving feature normalization if it doesn't exists.
        joblib.dump(norm_features, model_path + "norm_feature.pkl")

    if normal_out:
        Y_train = norm_labels.fit_transform(Y_train)
        Y_test = norm_labels.transform(Y_test)
        joblib.dump(norm_labels, model_path + "norm_labels.pkl")

    if nn_type == "normal":
        model = nng.NN_generator(n_layers, n_neurons,
                                 np.shape(X_train)[1],
                                 np.shape(Y_train)[1])
    elif nn_type == "tanh":
        model = nng.NN_generator_tanh(n_layers, n_neurons,
                                      np.shape(X_train)[1],
                                      np.shape(Y_train)[1])
    elif nn_type == "mix":
        model = nng.NN_generator_mix(n_layers, n_neurons,
                                     np.shape(X_train)[1],
                                     np.shape(Y_train)[1])
    elif nn_type == "regularization":
        model = nng.NN_generator_regul(n_layers, n_neurons,
                                       np.shape(X_train)[1],
                                       np.shape(Y_train)[1])
    elif nn_type == "dropput":
        model = nng.NN_generator_dropout(n_layers, n_neurons,
                                         np.shape(X_train)[1],
                                         np.shape(Y_train)[1])
    else:
        model = nng.NN_generator_mix_noise(n_layers, n_neurons,
                                           np.shape(X_train)[1],
                                           np.shape(Y_train)[1])

    adam = Adam()

    model.compile(
        loss='mean_squared_error',  #mean squared error
        optimizer=adam)

    callbacks_list = [
        LearningRateScheduler(lr_schedule, verbose=0),
        ModelCheckpoint(model_save, monitor="val_loss", save_best_only=True)
    ]

    if (model_name.find("mat") != -1):
        n_batch_size = 1024 * 5
    elif (model_name.find("single") != -1):
        n_batch_size = 1024 * 25
    elif np.shape(X_train)[0] > 140000:
        n_batch_size = 1024 * 2
    else:
        n_batch_size = 1024

    start_time = time.time()
    loss = model.fit(X_train,
                     Y_train,
                     epochs=100,
                     batch_size=n_batch_size,
                     verbose=0,
                     callbacks=callbacks_list,
                     validation_split=0.1,
                     shuffle=True)
    stop_time = time.time()

    score = model.evaluate(X_test, Y_test, verbose=2)

    if score > 0.7:  #if overfitting, save that model
        print("overfit, saving overfit model")
        model.save(model_save)

    if not os.path.exists(model_path + "/HestonModels.txt"):
        with open(model_path + "/HestonModels.txt", "w") as output_file:
            pass

    # Appending test score to file
    with open(model_path + "/HestonModels.txt", "a") as output_file:
        output_file.write("\n")
        output_file.write(model_save + " has a score of: " + str(score) +
                          ", and took a total time of: " +
                          str(stop_time - start_time))

    print("Done with: ", model_save)

    if include_loss:
        return loss, model

    else:
        return score
Esempio n. 8
0
def NNModel(input_array: np.ndarray,
            output_array: np.ndarray,
            n_layers: int,
            n_neurons: int,
            model_name: str,
            normal_out: bool = True,
            nn_type: str = "normal",
            scalar: str = "stardardize") -> float:
    print("Starting: " + model_name)
    if normal_out:
        if nn_type == "normal":
            folder_name = "Heston"
        elif nn_type == "tanh":
            folder_name = "HestonTanh"
        else:
            folder_name = "HestonMix"
    else:
        if nn_type == "normal":
            folder_name = "Heston_non_normal"
        elif nn_type == "tanh":
            folder_name = "Heston_non_normal_tanh"
        else:
            folder_name = "Heston_non_normal_mix"

    if not os.path.exists("Models2/" + folder_name):
        os.makedirs("Models2/" + folder_name)

    # checking file name
    no = 0
    for i in range(1, 100):
        saveString = "Models2/" + folder_name + "/" + model_name + "_" + str(
            i) + ".h5"
        no = i
        if os.path.isfile(saveString) == False:
            break

    model_path = "Models2/" + folder_name + "/" + model_name + "_" + str(
        no) + ".h5"

    X_train, X_test, Y_train, Y_test = train_test_split(input_array,
                                                        output_array,
                                                        test_size=0.3,
                                                        random_state=42)

    if scalar == "stardardize":
        norm_features = StandardScaler()
    else:
        norm_features = MinMaxScaler()

    if normal_out:
        norm_labels = StandardScaler()
        Y_train = norm_labels.fit_transform(Y_train)
        Y_test = norm_labels.transform(Y_test)

    X_train_norm = norm_features.fit_transform(X_train)

    X_test_norm = norm_features.transform(X_test)

    if nn_type == "normal":
        model = nng.NN_generator(n_layers, n_neurons,
                                 np.shape(input_array)[1],
                                 np.shape(output_array)[1])
    elif nn_type == "tanh":
        model = nng.NN_generator_tanh(n_layers, n_neurons,
                                      np.shape(input_array)[1],
                                      np.shape(output_array)[1])
    else:
        model = nng.NN_generator_mix(n_layers, n_neurons,
                                     np.shape(input_array)[1],
                                     np.shape(output_array)[1])

    adam = Adam()

    model.compile(
        loss='mean_squared_error',  #mean squared error
        optimizer=adam)

    callbacks_list = [
        LearningRateScheduler(lr_schedule, verbose=0),
        ModelCheckpoint(model_path, monitor="val_loss", save_best_only=True)
    ]

    start_time = time.time()
    model.fit(X_train_norm,
              Y_train,
              epochs=100,
              batch_size=1024,
              verbose=0,
              callbacks=callbacks_list,
              validation_split=0.1,
              shuffle=True)
    stop_time = time.time()

    score = model.evaluate(X_test_norm, Y_test, verbose=2)

    # Saving model
    model.save(model_path)

    # Saving normalization parameters
    joblib.dump(
        norm_features, "Models2/" + folder_name + "/" + model_name +
        "_norm_features_" + str(no) + ".pkl")

    if normal_out:
        joblib.dump(
            norm_labels, "Models2/" + folder_name + "/" + model_name +
            "_norm_labels_" + str(no) + ".pkl")

    # Appending test score to file
    with open("Models2/" + folder_name + "/HestonModels.txt",
              "a") as output_file:
        output_file.write("\n")
        output_file.write(model_name + " has a score of: " + str(score) +
                          ", and took a total time of: " +
                          str(stop_time - start_time))

    print("Stopping: " + model_name)
    return score