Пример #1
0
def train_network(configs, dataloader):

    # build model
    model = Model(configs['data']['input_mode'],
                  configs['data']['output_mode'])
    model.build_model(configs['model'])

    # in-memory training
    out_seq_len = configs['data']['input_sequence_length'] if configs['data'][
        'output_mode'] == "many_to_many" else 1
    x_train, y_train = dataloader.get_train_data(
        in_seq_len=configs['data']['input_sequence_length'],
        out_seq_len=out_seq_len)

    x_test, y_test = dataloader.get_test_data(
        in_seq_len=configs['data']['input_sequence_length'],
        out_seq_len=out_seq_len)

    history = model.train(
        x_train,
        y_train,
        x_test,
        y_test,
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        shuffle=configs['model']['shuffle_training_data'],
        allow_early_stop=configs['training']['allow_early_stop'],
    )

    return model, history
Пример #2
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('../data', configs['data']['filename']),
                      os.path.join('../data', configs['data']['VIMfile']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)

    '''
    # Out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test, p0_vec = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)
    pred = predictions.reshape((predictions.size, 1))

    #plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    #plot_results(pred, y_test) #normalised predictions

    # De-normalise & plot
    p_pred, p_true = denorm_transform(p0_vec, pred, y_test)
    plot_results(p_pred, p_true)  #de-normalised, i.e., original fex units

    # Compute evaluation metrics
    assess = EvalMetrics(p_true, p_pred)
    MAE = assess.get_MAE()
    RMSE = assess.get_RMSE()
    print("MAE on validation set is: %f" % MAE)
    print("RMSE on validation set is: %f" % RMSE)
Пример #3
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataProcessor(os.path.join('data', configs['data']['filename']),
                         configs['data']['train_test_split'],
                         configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=".")

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions_pointbypoint = model.predict_point_by_point(x_test)
    plot_results(predictions_pointbypoint, y_test)

    predictions_fullseq = model.predict_sequence_full(
        x_test, configs['data']['sequence_length'])
    plot_results(predictions_fullseq, y_test)
Пример #4
0
def main(train_after=False):
    config_file = 'web_flask/LSTM/config.json'
    configs = json.load(open(config_file, 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(configs['data']['filename'],
                      configs['data']['train_test_split'],
                      configs['data']['columns'],
                      normalise_meth=configs['data']['normalise'])

    model = Model()
    model.build_model(configs) if not train_after else \
        model.load_model(os.path.join( configs['model']['save_dir'],configs['model']['model_name']))
    history = LossHistory()

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'],
                history=history,
                x_test=x_test,
                y_test=y_test)
    '''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    '''

    history.loss_plot('epoch')
    #loss, accuracy = model.model.evaluate(x_test, y_test)
    #print(loss,accuracy)

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x[0])  #_test)

    #plot_results_multiple(predictions, y, configs['data']['sequence_length'])
    plot_results(predictions, y)
Пример #5
0
def main():
    configs = json.load(open("config.json", "r"))
    if not os.path.exists(configs["model"]["save_dir"]):
        os.makedirs(configs["model"]["save_dir"])

    data = DataLoader(
        os.path.join("data", configs["data"]["filename"]),
        configs["data"]["train_test_split"],
        configs["data"]["columns"],
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs["data"]["sequence_length"],
        normalise=configs["data"]["normalise"],
    )

    """
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	"""
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs["data"]["sequence_length"])
        / configs["training"]["batch_size"]
    )
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs["data"]["sequence_length"],
            batch_size=configs["training"]["batch_size"],
            normalise=configs["data"]["normalise"],
        ),
        epochs=configs["training"]["epochs"],
        batch_size=configs["training"]["batch_size"],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs["model"]["save_dir"],
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs["data"]["sequence_length"],
        normalise=configs["data"]["normalise"],
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)

    # plot_results_multiple(predictions, y_test, configs["data"]["sequence_length"])
    plot_results(predictions, y_test)
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    if not os.path.exists(configs['model']['log_dir']):
        os.makedirs(configs['model']['log_dir'])

    data_loader = DataLoader(os.path.join('data',
                                          configs['data']['filename_train']),
                             configs['data']['train_test_split'],
                             configs['data']['columns'],
                             is_training=True)

    model = Model()
    model.build_model(configs)
    steps_per_epoch = math.ceil(
        (data_loader.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    validation_steps = math.ceil(
        (data_loader.len_val - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(train_loader=data_loader.batch_generator(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise'],
        generator_type='train'),
                          val_loader=data_loader.batch_generator(
                              seq_len=configs['data']['sequence_length'],
                              batch_size=configs['training']['batch_size'],
                              normalise=configs['data']['normalise'],
                              generator_type='val'),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          validation_steps=validation_steps,
                          save_dir=configs['model']['save_dir'],
                          log_dir=configs['model']['log_dir'])

    test_data_loader = DataLoader(os.path.join(
        'data', configs['data']['filename_test']),
                                  0,
                                  configs['data']['columns'],
                                  is_training=False)
    x_test, y_test = test_data_loader.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    # predictions = model.predict_sequences_full(x_test, configs['data']['sequence_length'])
    # predictions = model.predict_point_by_point(x_test)

    plot = Plot()
    plot.plot_results_multiple(predictions, y_test,
                               configs['data']['sequence_length'])
Пример #7
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    if not configs['training']['train']:
        model.load_model(filepath='saved_models/02102019-164727-e2.h5')
    else:
        model.train(
            x,
            y,
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            save_dir=configs['model']['save_dir']
        )
    # out-of memory generative training
    # steps_per_epoch = math.ceil(
    #     (data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'],
    #                                                configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    predictions = model.predict_point_by_point(x_test)
    plot_results(predictions, y_test)
Пример #8
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    #獲取數據
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    #建立模型
    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    #out-of memory generative training
    #每一輪的'前傳導-后傳導'組合數量
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    #predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
    #plot_results(predictions, y_test)


#if __name__=='__main__':
#	main()
Пример #9
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    rates_count = configs['data']['sequence_length'] * configs['data'][
        'number_sequences']
    #os.path.join('data', configs['data']['filename']),
    data = DataLoader(configs['data']['symbol'],
                      configs['data']['train_test_split'],
                      configs['data']['columns'], rates_count)

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
    #plot_results(predictions, y_test)

    input("Press Enter to continue...")
def main(model_name=None):
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()

    if model_name:
        saved_model = os.path.join(configs['model']['save_dir'], model_name)
        model.load_model(saved_model)
    else:
        model.build_model(configs)

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
        # in-memory training
        model.train(
            x,
            y,
            epochs = configs['training']['epochs'],
            batch_size = configs['training']['batch_size'],
            save_dir = configs['model']['save_dir']
        )
    '''

    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    if not model_name:
        model.train_generator(data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
Пример #11
0
def main_sin():
    config = json.load(open("config_sin.json", 'r'))
    data = DataLoader(os.path.join('data', config['data']['filename']),
                      config['data']['train_test_split'],
                      config['data']['columns'])
    x_train, y_train = data.get_train_data(config['data']['sequence_length'],
                                           config['data']['normalise'])
    x_test, y_test = data.get_test_data()
    model = Model()
    model.build_model(config)
    model.train(x_train, y_train, config['training']['epochs'],
                config['training']['batch_size'])
Пример #12
0
def main():
    configs = json.load(open('config.json', 'r'))

    #create folder for save model params
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    #plot true data
    #plot_results(data.data_train,True)

    #train model
    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )


    predictions = model.predict_point_by_point(x_test)
#    plot_results(predictions, y_test)
#    print (predictions)
#    plot_results(predictions, y_test)



    data1 = pd.DataFrame(predictions)    
    data1.to_csv('predict.csv')
    data2 = pd.DataFrame(y_test)
    data2.to_csv('true.csv')
Пример #13
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    # 从已经保存的模型中加载模型,此时不需要再进行模型训练:即不需要再执行model.train()部分
    # model.load_model(r'saved_models/15102019-155115-e2.h5')

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    # print('x的shape是:{0}'.format(x.shape))  # (3942, 49, 2)
    # print('y的shape是:{0}'.format(y.shape))  # (3942, 1)

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])
    '''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )
    '''

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
Пример #15
0
def main():
    configs = json.load(open('config.json', 'r'))

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size']
	)
	'''
    # out-of memory generative training
    # math.ceil(所有窗的个数 / batch_size)
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # predictions = model.predict_point_by_point(x_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training

    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'],
                          configs=configs)

    x_test, y_test, p0 = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)
    y_test = np.reshape(np.copy(y_test), -1)

    plot_results((p0 * (predictions + 1))[-200:], (p0 * (y_test + 1))[-200:])
    measure_performance(predictions, y_test)
Пример #17
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )
    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    x_test, y_test, onedot = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )
    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(onedot)
    with open('output.txt', 'w') as f:
        f.write('预测下一时间的螺栓螺母消耗量为:' + str(int((predictions[-1] + 1) * data.last_raw_data(seq_len=configs['data']['sequence_length']))))
Пример #18
0
def train(Model):
    """Train the model"""

    train_set, target, categoricals = utils.load_data(args.dataset,
                                                      args.file_name)
    dataset_train = Dataset(dataset=train_set,
                            categorical_indices=categoricals)
    target = dataset_train[target]
    dataset_train.drop(target, axis=1, inplace=True)
    (
        categorical_variables,
        non_categorical_variables,
    ) = dataset_train.get_variables_names()

    print("Creating cross products dataset")
    cross_products = dataset_train.cross_categorical_dataset()

    model = Model(categorical_variables, non_categorical_variables)
    model = model.build_model(cross_products)

    print("Training model")
    model.fit(
        [
            [
                train_set[categorical_variables],
                train_set[non_categorical_variables]
            ],
            cross_products,
        ],
        target,
        epochs=config.EPOCHS,
        validation_split=config.VALIDATION_SPLIT,
    )
Пример #19
0
def main():
    configs = json.load(open(CONFIG, 'r'))

    data = DataLoader(DATA, configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          model_path=MODEL)

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    #predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
    #plot_results(predictions, y_test)
    sys.stdout.write("--END--")
Пример #20
0
def main(choice):
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    model = Model()
    model.build_model(configs)
    if (choice != 'info'):
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        # in-memory training
        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'])

        # out-of memory generative training
        # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
        # model.train_generator(
        #     data_gen = data.generate_train_batch(
        #         seq_len = configs['data']['sequence_length'],
        #         batch_size = configs['training']['batch_size'],
        #         normalise = configs['data']['normalise']
        #     ),
        #     epochs = configs['training']['epochs'],
        #     batch_size = configs['training']['batch_size'],
        #     steps_per_epoch = steps_per_epoch
        # )

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])

        if (choice == "multi"):
            predictions = model.predict_sequences_multiple(
                x_test, configs['data']['sequence_length'],
                configs['data']['sequence_length'])
            plot_results_multiple(predictions, y_test,
                                  configs['data']['sequence_length'])
        elif (choice == "seq"):
            predictions = model.predict_sequence_full(
                x_test, configs['data']['sequence_length'])
            plot_results(predictions, y_test)
        else:
            predictions = model.predict_point_by_point(x_test)
            plot_results(predictions, y_test)
Пример #21
0
def main():
    #load parameters
    configs = json.load(open('./data/config.json','r'))
    if not os.path.exists(configs['model']['save_dir']):os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data',configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns'],

    )
    #create RNN model
    model=Model()
    model.build_model(configs)

    #loading trainning data
    x,y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )
    print(x.shape)
    print(y.shape)

    #training model
    model.train(
        x,
        y,
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        save_dir=configs['model']['save_dir']
    )

    #test results
    x_test, y_test = data.get_test_data(
        seq_len= configs['data']['sequence_length'],
        normalise=configs['data']['normalise'],
    )

    #results visualization
    predictions_multiseq = model.predict_sequences_multiple(x_test,configs['data']['sequence_length'],configs['data']['sequence_length'])
    predictions_pointbypoint=model.predict_point_by_point(x_test)

    plot_results_multiple(predictions_multiseq,y_test,configs['data']['sequence_length'])
    plot_results(predictions_pointbypoint,y_test)
Пример #22
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
    )

    model = Model()
    model.build_model(configs)

    # get train data
    x, y = data.get_train_data()

    #x=x.squeeze()
    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])
    #    # out-of memory generative training
    #    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    #    model.train_generator(
    #        data_gen=data.generate_train_batch(
    #            batch_size=configs['training']['batch_size'],
    #        ),
    #        epochs=configs['training']['epochs'],
    #        batch_size=configs['training']['batch_size'],
    #        steps_per_epoch=steps_per_epoch,
    #        save_dir=configs['model']['save_dir']
    #    )

    # testing model
    x_test, y_test = data.get_test_data()
    #x_test=x_test.squeeze()

    predictions = model.predict_point_by_point(x_test)

    #   plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
Пример #23
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training (heavier computation)
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    #predictions are made point by point with model.predict_point_by_point
    predictions = model.predict_point_by_point(x_test)
    plot_results(predictions, y_test)
Пример #24
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    dataframe = pd.read_csv(configs['data']['filename'])
    f = open('/Users/yucheng/Downloads/project2/stockIDs.txt', 'r')
    stockIDs = [int(line.split('\n')[0]) for line in f.readlines()]
    for id in stockIDs[377:378]:
        # for id in stockIDs[444:500]:
        print("index: ", stockIDs.index(id))
        data = DataLoader(dataframe, id, configs['data']['train_test_split'],
                          configs['data']['columns'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])
        '''
    	# in-memory training
    	model.train(
    		x,
    		y,
    		epochs = configs['training']['epochs'],
    		batch_size = configs['training']['batch_size'],
    		save_dir = configs['model']['save_dir']
    	)
    	'''
        # out-of memory generative training
        steps_per_epoch = math.ceil(
            (data.len_train - configs['data']['sequence_length']) /
            configs['training']['batch_size'])
        model.train_generator(id=id,
                              data_gen=data.generate_train_batch(
                                  seq_len=configs['data']['sequence_length'],
                                  batch_size=configs['training']['batch_size'],
                                  normalise=configs['data']['normalise']),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'])
Пример #25
0
def main():
    model_config = json.load(open("config.json", "r"))["model"]
    dataset_path = 'data/'
    # Requirement: Check the presence of the dataset
    if check_dataset(dataset_path):
        configs = json.load(open('config.json', 'r'))
        # 1) Build the model
        model = Model(model_config=model_config)
        model.build_model(configs)
        batch_size, epochs, = 4, 30
        cols = configs['training']['cols']
        sequence_length = configs['data']['sequence_length']
        save_dir = "model"
        l = 0
        dataset_path = glob.glob("{}/*.txt".format(dataset_path))
        # 2 ) Loop over the files in the dataset folder
        for filename in dataset_path:
            print("Training {}/{} - {}".format(l, len(dataset_path), filename))
            l += 1
            # 3) Divide the dataset in parts and loop over them
            chunksize = 10**4
            for chunk in pd.read_csv(filename, chunksize=chunksize):
                # 4) Get and prepare data
                data = DataModel()
                x = data.get_train_data(
                    data=[x for x in chunk.get(cols).values.tolist()],
                    seq_len=sequence_length)
                X_train, X_test, y_train, y_test = train_test_split(
                    data.dataX, data.dataY, test_size=0.33)
                print(y_train.shape)
                # 5) Train the model
                model.train(X_train,
                            X_test,
                            y_train,
                            y_test,
                            epochs=epochs,
                            batch_size=batch_size,
                            save_dir=save_dir)
Пример #26
0
def main():
    configs = json.load(open('config.json', 'r'))

    # 加载数据
    X_train, y_train, X_test, y_test = None

    # 生成模型
    # load_model和build_model方法二选一
    model = Model()
    # model.load_model(filepath='')
    model.build_model(configs)

    # 训练模型
    model.train_model(x=X_train,
                      y=y_train,
                      epochs=configs['training']['epochs'],
                      batch_size=configs['training']['batch_size'],
                      save_dir=configs['model']['save_dir'])

    # 测试模型
    prediction = model.test_model(x=X_test)

    print "Test data true label is : %s" % y_test
    print "Model output is : %s" % prediction
Пример #27
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):os.makedirs(configs['model']['save_dir'])

    model = Model()
    my_model = model.build_model(configs)

    plot_model(my_model, to_file='output\model.png', show_shapes=True)
    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    x, y = data.get_train_data(
        configs['data']['sequence_length'],
        configs['data']['normalise']
    )

    print(x.shape)
    print(y.shape)

    print(configs['training']['batch_size'])
    print(configs['model']['save_dir'])
    model.train(x,
                y,
                configs['training']['epochs'],
                configs['training']['batch_size'],
                configs['model']['save_dir']
                )

    x_test, y_test = data.get_test_data(
        configs['data']['sequence_length'],
        configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiplt(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequences_full(x_test, configs['data']['sequence_length'])
    prediction_point = model.predict_point_by_point(x_test)

    # print(prediction_point)
    # print(np.array(predictions).shape)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(prediction_point, y_test)
Пример #28
0
def main():
    #读取所需参数
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    #读取数据
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    #创建RNN模型
    model = Model()
    mymodel = model.build_model(configs)

    plot_model(mymodel, to_file='model.png', show_shapes=True)

    #加载训练数据
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    print(x.shape)
    print(y.shape)

    #训练模型
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    #测试结果
    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    #展示测试效果
    predictions = model.predict_sequences_multiple(
        x_test,
        configs['data']['sequence_length'],
        configs['data']['sequence_length'],
        debug=False)
    print(np.array(predictions).shape)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
Пример #29
0
def main():
    configs = json.load(open('configcrops.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    # Yogyakarta: Kulon progo, bantul, gunung kidul, sleman, DIY
    # Jawa Barat: Bandung, Tasikmalaya, Majalengka, Cirebon, Kuningan, Garut, Sumedang, Cianjut, Subang, Purwakarta, Indramayu
    # Ciamis, Sukabumi, Bogor, Bekasi, Karawang

    # # out-of memory generative training
    # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    # # save_dir = configs['model']['save_dir']

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # print(x_test)
    # print(y_test)

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])

    predictions_point = model.predict_point_by_point(x_test)
    print(len(predictions_point))
    plot_results(predictions_point, y_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    # predictions_full = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # plot_results(predictions_full, y_test)

    groundtrue = data._groundtruths(1)
    groundtrue = (groundtrue.ravel())
    print(len(groundtrue))

    RMSElist = []
    for i in range(len(groundtrue)):
        errorrate = groundtrue[i] - predictions_point[i]
        hasilkuadrat = errorrate * errorrate
        RMSElist.append(hasilkuadrat)
    RMSE = sum(RMSElist) / (len(predictions_point) - 2)
    RMSE = RMSE**(1 / 2)
    print(RMSE)

    getdataforecast = data._forecasting(5, 1)

    total_prediksi = 5
    takefrom = 5
    forecast_result = model.forecast(total_prediksi, getdataforecast, takefrom)
    # print(forecast_result[0])
    # forecast_result=np.append(forecast_result,[0.0])
    # print(forecast_result)

    n_steps = 8
    # split into samples
    X, y = split_sequence(forecast_result, n_steps)
    # reshape from [samples, timesteps] into [samples, timesteps, features]
    n_features = 1
    # print(X)
    X = X.reshape((X.shape[0], X.shape[1], n_features))
    # define model
    model = Sequential()
    model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mse')
    # fit model
    model.fit(X, y, epochs=200, verbose=0)

    # demonstrate prediction
    for j in range(total_prediksi):
        getxlastnumber = array(forecast_result[(-n_steps - 1):-1])
        x_input = getxlastnumber
        # print(x_input)

        x_input = x_input.reshape((1, n_steps, n_features))
        yhat = model.predict(x_input, verbose=0)
        # print(yhat[0][0])

        forecast_result = np.append(forecast_result, yhat[0])
        # prediction_point=np.append(prediction_point,yhat[0])

    plot_results_onlypredicted(forecast_result)
Пример #30
0
def plot_results_multiple(predicted_data, true_data, prediction_len):
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    ax.plot(true_data, label='True Data')
    # Pad the list of predictions to shift it in the graph to it's correct start
    for i, data in enumerate(predicted_data):
        padding = [None for p in range(i * prediction_len)]
        plt.plot(padding + data, label='Prediction')
        plt.legend()
    plt.show()


# %% Build/Train the model
model = Model()

model.build_model(configs)

# out-of memory generative training
steps_per_epoch = math.ceil(
    (data.len_train - configs['data']['sequence_length']) /
    configs['training']['batch_size'])

model.train_generator(data_gen=data.generate_train_batch(
    seq_len=configs['data']['sequence_length'],
    batch_size=configs['training']['batch_size'],
    normalise=configs['data']['normalise']),
                      epochs=configs['training']['epochs'],
                      batch_size=configs['training']['batch_size'],
                      steps_per_epoch=steps_per_epoch,
                      save_dir=configs['model']['save_dir'])
Пример #31
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    lossesMINE = []
    lossesKERAS = []
    # for day_prediction in [1, 2, 3, 4, 5, 10, 50]:
    day_prediction = 10
    print("Predicting %i days..." % day_prediction)

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'],
                               day_pred=day_prediction)

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    # out-of memory generative training

    # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise'],
    #         day_pred=day_prediction
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'],
        day_pred=day_prediction)

    # print(x_test.shape)
    # print(len(data.denormalization_vals))
    # print(y_test.shape)

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)
    #
    # y_test_unormalized = np.zeros((y_test.shape[0], ))
    # prediction_unormalized = []
    #
    # for i in range(4):
    #     for j in range(int(configs['data']['sequence_length']) - 10):
    #         y_test_unormalized[j*(i+1)] = (y_test[j] + 1)*data.data_test[i*int(configs['data']['sequence_length']), 0]
    #         prediction_unormalized.append((predictions[j*(i+1)] + 1)*data.data_test[i*int(configs['data']['sequence_length']), 0])

    npPredictions = np.asarray(predictions)
    # print(type(npPredictions))
    # print(type(y_test))
    # print(npPredictions.shape)
    # print(y_test.shape)
    loss = 0
    for i in range(len(npPredictions)):
        loss += (npPredictions[i] - y_test[i])**2
    print(loss)
    keras_loss = model.model.evaluate(x_test, y_test)
    print(keras_loss)

    lossesMINE.append(loss)
    lossesKERAS.append(keras_loss)

    #plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    real_y = np.reshape(y_test, (y_test.shape[0], )) * np.asarray(
        data.denormalization_vals) + np.asarray(data.denormalization_vals)
    real_pred = predictions * np.asarray(
        data.denormalization_vals) + np.asarray(data.denormalization_vals)
    # print(real_y.shape)
    # print(real_pred.shape)
    data.denormalization_vals = []

    #plot_results(predictions, y_test)

    plot_results(real_pred, real_y)

    print(lossesMINE)
    print(lossesKERAS)