예제 #1
0
def singleNN(inputArray : np.ndarray, outputArray : np.ndarray, nLayers : int, nNeurons : int, modelname : str):
    X_train, X_test, y_train, y_test = train_test_split(input2, output_data, test_size=0.3, random_state=42)

    norm_features = StandardScaler() #MinMaxScaler(feature_range = (-1, 1))
    norm_labels = StandardScaler()

    X_train_norm = norm_features.fit_transform(X_train)
    Y_train_norm = norm_labels.fit_transform(y_train)

    X_test_norm = norm_features.transform(X_test)
    Y_test_norm = norm_labels.transform(y_test)

    model = nng.NNGenerator(4, 1000, np.shape(input2)[1], np.shape(output_data)[1])

    adam = Adam(lr = 0.01)

    model.compile(
        loss = 'mean_squared_error', #mean squared error
        optimizer = adam
        )

    callbacks_list = [
        LearningRateScheduler(lr_schedule, verbose = 0),
        EarlyStopping(monitor='val_loss', patience=15)
    ]

    model.fit(X_train_norm, Y_train_norm, epochs=100, batch_size=1024, verbose = 2, callbacks = callbacks_list, validation_split = 0.1)

    score=model.evaluate(X_test_norm, Y_test_norm, verbose=2)

    print(score)

    no = 0
    for i in range(1, 100):
        saveString = "Models/HestonSinglePrice/Heston_price_single_"+str(i)+".h5"
        no = i
        if os.path.isfile(saveString) == False:
            break

    # Saving model
    model.save("Models/HestonSinglePrice/Heston_price_single_"+str(no)+".h5")

    # Saving normalization parameters
    joblib.dump(norm_features, "Models/HestonSinglePrice/norm_features_price_"+str(no)+".pkl")
    joblib.dump(norm_labels, "Models/HestonSinglePrice/norm_labels_price_"+str(no)+".pkl")
예제 #2
0
        return a

strike = 100
sigma = 0.2 
mat = 1

X, Y = dg.BSDataGenerator(strike, sigma, mat, 300000)

norm_features = StandardScaler() #MinMaxScaler(feature_range = (-1, 1))
norm_labels = StandardScaler()

norm_x = norm_features.fit_transform(X)
norm_y = norm_labels.fit_transform(Y)

# Modelling
model = nng.NNGenerator(4, 5, 1, 1)

adam = Adam(lr = 0.1)

model.compile(
    loss = 'mean_squared_error', #mean squared error
    optimizer = adam
    )

callbacks_list = [
    LearningRateScheduler(lr_schedule, verbose = 0),
    EarlyStopping(monitor='val_loss', patience=25)
]

model.fit(norm_x, norm_y, epochs=100, batch_size=1024, verbose = 0, callbacks = callbacks_list, validation_split = 0.1)
예제 #3
0
train_input = np.loadtxt("./Data/train_input_sabr.csv", delimiter=",")
train_output = np.loadtxt("./Data/train_output_sabr_approx.csv", delimiter=",")
test_input = np.loadtxt("./Data/test_input_sabr.csv", delimiter=",")
test_output = np.loadtxt("./Data/test_output_sabr_approx.csv", delimiter=",")

norm_features = StandardScaler()  #MinMaxScaler(feature_range = (-1, 1))
norm_labels = StandardScaler()

norm_x = norm_features.fit_transform(train_input)
norm_y = norm_labels.fit_transform(train_output)

norm_x_test = norm_features.transform(test_input)
norm_y_test = norm_labels.transform(test_output)

model = nng.NNGenerator(4, 5, 14, 10)

adam = Adam(lr=0.1)

model.compile(
    loss='mean_squared_error',  #mean squared error
    optimizer=adam)

callbacks_list = [
    LearningRateScheduler(lr_schedule, verbose=0),
    EarlyStopping(monitor='val_loss', patience=25)
]

model.fit(norm_x,
          norm_y,
          epochs=100,
예제 #4
0
print(np.shape(output2))

output_data = np.reshape(output2, (-1, 1))

X_train, X_test, y_train, y_test = train_test_split(input2, output_data, test_size=0.3, random_state=42)

norm_features = StandardScaler() #MinMaxScaler(feature_range = (-1, 1))
norm_labels = StandardScaler()

X_train_norm = norm_features.fit_transform(X_train)
Y_train_norm = norm_labels.fit_transform(y_train)

X_test_norm = norm_features.transform(X_test)
Y_test_norm = norm_labels.transform(y_test)

model = nng.NNGenerator(4, 1000, np.shape(input2)[1], np.shape(output_data)[1])

adam = Adam(lr = 0.1)

model.compile(
    loss = 'mean_squared_error', #mean squared error
    optimizer = adam
    )

callbacks_list = [
    LearningRateScheduler(lr_schedule, verbose = 0),
    EarlyStopping(monitor='val_loss', patience=15)
]

model.fit(X_train_norm, Y_train_norm, epochs=100, batch_size=1024, verbose = 2, callbacks = callbacks_list, validation_split = 0.1)