Esempio n. 1
0
 def __init__(self, configs):
     self.method = configs['model']['method']
     self.model_path = configs['model']['filename_model']
     self.raw_path = configs['data']['filename']
     self.clean_path = configs['data']['filename_clean']
     self.batch_size = configs['data']['batch_size']
     self.x_window_size = configs['data']['x_window_size']
     self.y_window_size = configs['data']['y_window_size']
     self.y_lag = configs['data']['y_lag']
     self.filter_cols = configs['data']['filter_columns']
     self.train_test_split = configs['data']['train_test_split']
     self.model = None
     self.dl = etl.ETL(self.method)
import h5py
import json
import numpy as np
import tensorflow as tf

import etl, lstm, plot, stats

tf.set_random_seed(777) # for reproducibility
configs = json.loads(open('configs.json').read())

dl = etl.ETL(
    filename_in=configs['data']['filename'],
    filename_out=configs['data']['filename_clean'],
    batch_size=configs['data']['batch_size'],
    x_window_size=configs['data']['x_window_size'],
    y_window_size=configs['data']['y_window_size'],
    x_col=configs['data']['x_base_column'],
    y_col=configs['data']['y_predict_column'],
    filter_cols=configs['data']['filter_columns'],
    target_percent_change=configs['rule']['target_percent_change'],
    train_test_split=configs['data']['train_test_split']
)

# dl.create_clean_datafile()

with h5py.File(configs['data']['filename_clean'], 'r') as hf:
    nrows = hf['x'].shape[0]
    ncols = hf['x'].shape[2]

    print(hf['x'].shape)
    print(hf['y'].shape)
Esempio n. 3
0
import gru
import etl
import h5py
import json
configs = json.loads(open('configs.json').read())
model = gru.load_network("model/model_saved_2016_20180614.h5")
dl = etl.ETL("Integer")
dl.usage = "test"

true_values = []


def generator_strip_xy(data_gen_test, true_values):
    for x, y in data_gen_test:
        true_values += list(y)
        yield x


with h5py.File(configs['data']['filename_clean'], 'r') as hf:
    nrows = hf['x'].shape[0]
    ncols = hf['x'].shape[2]
ntest = nrows
steps_test = int(ntest / configs['data']['batch_size'])
tesize = steps_test * configs['data']['batch_size']
data_gen_test = dl.generate_clean_data(
    configs['data']['filename_clean'],
    size=tesize,
    batch_size=configs['data']['batch_size'])
print('> Testing model on', ntest, 'data rows with', steps_test, 'steps')
predictions = model.predict_generator(generator_strip_xy(
    data_gen_test, true_values),
Esempio n. 4
0
    for i, data in enumerate(predicted_data):
        padding = [None for p in range(i * prediction_len)]
        plt.plot(padding + data, label='Predicción')
        plt.legend()
    plt.show()


with h5py.File(configs['data']['filename_clean'], 'r') as hf:
    nrows = hf['x'].shape[0]
    ncols = hf['x'].shape[2]

ntrain = int(configs['data']['train_test_split'] * nrows)
ntest = nrows - ntrain
steps_test = int(ntest / configs['data']['batch_size'])

dl = etl.ETL()

model = lstm.load_network(configs['model']['filename_model'])

data_gen_test = dl.generate_clean_data(
    configs['data']['filename_clean'],
    batch_size=configs['data']['batch_size'],
    start_index=ntrain)

predictions = model.predict_generator(generator_strip_xy(
    data_gen_test, true_values),
                                      steps=steps_test)

plot_results(predictions[:500], true_values[:500])

#Reload the data-generator