示例#1
0
def fit_model_threaded(model, data_gen_train, steps_per_epoch, configs):
    """thread worker for model fitting - so it doesn't freeze on jupyter notebook"""
    model = lstm.build_network([ncols, 150, 150, 1])
    model.fit_generator(data_gen_train,
                        steps_per_epoch=steps_per_epoch,
                        epochs=configs['model']['epochs'])
    model.save(configs['model']['filename_model'])
    print('> Model Trained! Weights saved in',
          configs['model']['filename_model'])
    return
示例#2
0
    configs['data']['filename_clean'],
    batch_size=configs['data']['batch_size'])

with h5py.File(configs['data']['filename_clean'], 'r') as hf:
    nrows = hf['x'].shape[0]
    ncols = hf['x'].shape[2]
    print(ncols)
    print(hf['x'].shape)

ntrain = int(configs['data']['train_test_split'] * nrows)
steps_per_epoch = int(
    (ntrain / configs['model']['epochs']) / configs['data']['batch_size'])
print('> Clean data has', nrows, 'data rows. Training on', ntrain, 'rows with',
      steps_per_epoch, 'steps-per-epoch')

model = lstm.build_network(layers=[ncols, 150, 150, 1])
t = threading.Thread(target=fit_model_threaded,
                     args=[model, data_gen_train, steps_per_epoch, configs])
t.start()

data_gen_test = dl.generate_clean_data(
    configs['data']['filename_clean'],
    batch_size=configs['data']['batch_size'],
    start_index=ntrain)

ntest = nrows - ntrain
steps_test = int(ntest / configs['data']['batch_size'])
print('> Testing model on', ntest, 'data rows with', steps_test, 'steps')

predictions = model.predict_generator(generator_strip_xy(
    data_gen_test, true_values),
示例#3
0
data_gen_train = dl.generate_clean_data(
    configs['data']['filename_clean'],
    batch_size=configs['data']['batch_size'])

with h5py.File(configs['data']['filename_clean'], 'r') as hf:
    nrows = hf['x'].shape[0]
    ncols = hf['x'].shape[2]

ntrain = int(configs['data']['train_test_split'] * nrows)
steps_per_epoch = int(
    (ntrain / configs['model']['epochs']) / configs['data']['batch_size'])
print('> Clean data has', nrows, 'data rows. Training on', ntrain, 'rows with',
      steps_per_epoch, 'steps-per-epoch')

model = lstm.build_network([ncols, 150, 150, 1])
t = threading.Thread(target=fit_model_threaded,
                     args=[model, data_gen_train, steps_per_epoch, configs])
t.start()

data_gen_test = dl.generate_clean_data(
    configs['data']['filename_clean'],
    batch_size=configs['data']['batch_size'],
    start_index=ntrain)

ntest = nrows - ntrain
steps_test = int(ntest / configs['data']['batch_size'])
print('> Testing model on', ntest, 'data rows with', steps_test, 'steps')

predictions = model.predict_generator(generator_strip_xy(
    data_gen_test, true_values),
示例#4
0
    ncols = hf['x'].shape[2]
    
ntrain = int(configs['data']['train_test_split'] * nrows)
steps_per_epoch = int((ntrain / configs['model']['epochs']) / configs['data']['batch_size'])
print('> Clean data has', nrows, 'data rows. Training on', ntrain, 'rows with', steps_per_epoch, 'steps-per-epoch')


data_gen_test = dl.generate_clean_data(
    configs['data']['filename_clean'],
    batch_size=configs['data']['batch_size'],
    start_index=ntrain
)



model, history = lstm.build_network([ncols, 128, 256, 1], data_gen_train, data_gen_test, steps_per_epoch, configs)

MSE = 0
for error in history.history['mean_squared_error']:
    MSE += error

MSE = ( MSE / len(history.history['mean_squared_error'])) * 100

print(f"> Model MSE : {MSE}")

data_gen_test = dl.generate_clean_data(
    configs['data']['filename_clean'],
    batch_size=configs['data']['batch_size'],
    start_index=ntrain
)