コード例 #1
0
ファイル: run.py プロジェクト: BenfenYU/HelpPlay
def main(train_after=False):
    config_file = 'web_flask/LSTM/config.json'
    configs = json.load(open(config_file, 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(configs['data']['filename'],
                      configs['data']['train_test_split'],
                      configs['data']['columns'],
                      normalise_meth=configs['data']['normalise'])

    model = Model()
    model.build_model(configs) if not train_after else \
        model.load_model(os.path.join( configs['model']['save_dir'],configs['model']['model_name']))
    history = LossHistory()

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'],
                history=history,
                x_test=x_test,
                y_test=y_test)
    '''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    '''

    history.loss_plot('epoch')
    #loss, accuracy = model.model.evaluate(x_test, y_test)
    #print(loss,accuracy)

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x[0])  #_test)

    #plot_results_multiple(predictions, y, configs['data']['sequence_length'])
    plot_results(predictions, y)
コード例 #2
0
def main():
    configs = json.load(open("config.json", "r"))
    if not os.path.exists(configs["model"]["save_dir"]):
        os.makedirs(configs["model"]["save_dir"])

    data = DataLoader(
        os.path.join("data", configs["data"]["filename"]),
        configs["data"]["train_test_split"],
        configs["data"]["columns"],
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs["data"]["sequence_length"],
        normalise=configs["data"]["normalise"],
    )

    """
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	"""
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs["data"]["sequence_length"])
        / configs["training"]["batch_size"]
    )
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs["data"]["sequence_length"],
            batch_size=configs["training"]["batch_size"],
            normalise=configs["data"]["normalise"],
        ),
        epochs=configs["training"]["epochs"],
        batch_size=configs["training"]["batch_size"],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs["model"]["save_dir"],
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs["data"]["sequence_length"],
        normalise=configs["data"]["normalise"],
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)

    # plot_results_multiple(predictions, y_test, configs["data"]["sequence_length"])
    plot_results(predictions, y_test)
コード例 #3
0
ファイル: run.py プロジェクト: chenchen521/lstm_stock
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    #獲取數據
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    #建立模型
    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    #out-of memory generative training
    #每一輪的'前傳導-后傳導'組合數量
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    #predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
    #plot_results(predictions, y_test)


#if __name__=='__main__':
#	main()
コード例 #4
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    if not configs['training']['train']:
        model.load_model(filepath='saved_models/02102019-164727-e2.h5')
    else:
        model.train(
            x,
            y,
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            save_dir=configs['model']['save_dir']
        )
    # out-of memory generative training
    # steps_per_epoch = math.ceil(
    #     (data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'],
    #                                                configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    predictions = model.predict_point_by_point(x_test)
    plot_results(predictions, y_test)
コード例 #5
0
ファイル: run.py プロジェクト: sergiovision/XTradeServer
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    rates_count = configs['data']['sequence_length'] * configs['data'][
        'number_sequences']
    #os.path.join('data', configs['data']['filename']),
    data = DataLoader(configs['data']['symbol'],
                      configs['data']['train_test_split'],
                      configs['data']['columns'], rates_count)

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
    #plot_results(predictions, y_test)

    input("Press Enter to continue...")
コード例 #6
0
ファイル: main.py プロジェクト: heroichu/traffic-predict
def main():
    configs = json.load(open('config.json', 'r'))

    #create folder for save model params
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    #plot true data
    #plot_results(data.data_train,True)

    #train model
    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )


    predictions = model.predict_point_by_point(x_test)
#    plot_results(predictions, y_test)
#    print (predictions)
#    plot_results(predictions, y_test)



    data1 = pd.DataFrame(predictions)    
    data1.to_csv('predict.csv')
    data2 = pd.DataFrame(y_test)
    data2.to_csv('true.csv')
コード例 #7
0
def main(model_name=None):
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()

    if model_name:
        saved_model = os.path.join(configs['model']['save_dir'], model_name)
        model.load_model(saved_model)
    else:
        model.build_model(configs)

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
        # in-memory training
        model.train(
            x,
            y,
            epochs = configs['training']['epochs'],
            batch_size = configs['training']['batch_size'],
            save_dir = configs['model']['save_dir']
        )
    '''

    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    if not model_name:
        model.train_generator(data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
コード例 #8
0
def main_sin():
    config = json.load(open("config_sin.json", 'r'))
    data = DataLoader(os.path.join('data', config['data']['filename']),
                      config['data']['train_test_split'],
                      config['data']['columns'])
    x_train, y_train = data.get_train_data(config['data']['sequence_length'],
                                           config['data']['normalise'])
    x_test, y_test = data.get_test_data()
    model = Model()
    model.build_model(config)
    model.train(x_train, y_train, config['training']['epochs'],
                config['training']['batch_size'])
コード例 #9
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    # 从已经保存的模型中加载模型,此时不需要再进行模型训练:即不需要再执行model.train()部分
    # model.load_model(r'saved_models/15102019-155115-e2.h5')

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    # print('x的shape是:{0}'.format(x.shape))  # (3942, 49, 2)
    # print('y的shape是:{0}'.format(y.shape))  # (3942, 1)

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])
    '''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )
    '''

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
コード例 #10
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
コード例 #11
0
def main():
    configs = json.load(open('config.json', 'r'))

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size']
	)
	'''
    # out-of memory generative training
    # math.ceil(所有窗的个数 / batch_size)
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # predictions = model.predict_point_by_point(x_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training

    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'],
                          configs=configs)

    x_test, y_test, p0 = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)
    y_test = np.reshape(np.copy(y_test), -1)

    plot_results((p0 * (predictions + 1))[-200:], (p0 * (y_test + 1))[-200:])
    measure_performance(predictions, y_test)
コード例 #13
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )
    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    x_test, y_test, onedot = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )
    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(onedot)
    with open('output.txt', 'w') as f:
        f.write('预测下一时间的螺栓螺母消耗量为:' + str(int((predictions[-1] + 1) * data.last_raw_data(seq_len=configs['data']['sequence_length']))))
コード例 #14
0
def main(choice):
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    model = Model()
    model.build_model(configs)
    if (choice != 'info'):
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        # in-memory training
        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'])

        # out-of memory generative training
        # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
        # model.train_generator(
        #     data_gen = data.generate_train_batch(
        #         seq_len = configs['data']['sequence_length'],
        #         batch_size = configs['training']['batch_size'],
        #         normalise = configs['data']['normalise']
        #     ),
        #     epochs = configs['training']['epochs'],
        #     batch_size = configs['training']['batch_size'],
        #     steps_per_epoch = steps_per_epoch
        # )

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])

        if (choice == "multi"):
            predictions = model.predict_sequences_multiple(
                x_test, configs['data']['sequence_length'],
                configs['data']['sequence_length'])
            plot_results_multiple(predictions, y_test,
                                  configs['data']['sequence_length'])
        elif (choice == "seq"):
            predictions = model.predict_sequence_full(
                x_test, configs['data']['sequence_length'])
            plot_results(predictions, y_test)
        else:
            predictions = model.predict_point_by_point(x_test)
            plot_results(predictions, y_test)
コード例 #15
0
def main():
    configs = json.load(open(CONFIG, 'r'))

    data = DataLoader(DATA, configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          model_path=MODEL)

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    #predictions = model.predict_point_by_point(x_test)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
    #plot_results(predictions, y_test)
    sys.stdout.write("--END--")
コード例 #16
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):os.makedirs(configs['model']['save_dir'])

    model = Model()
    my_model = model.build_model(configs)

    plot_model(my_model, to_file='output\model.png', show_shapes=True)
    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    x, y = data.get_train_data(
        configs['data']['sequence_length'],
        configs['data']['normalise']
    )

    print(x.shape)
    print(y.shape)

    print(configs['training']['batch_size'])
    print(configs['model']['save_dir'])
    model.train(x,
                y,
                configs['training']['epochs'],
                configs['training']['batch_size'],
                configs['model']['save_dir']
                )

    x_test, y_test = data.get_test_data(
        configs['data']['sequence_length'],
        configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiplt(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequences_full(x_test, configs['data']['sequence_length'])
    prediction_point = model.predict_point_by_point(x_test)

    # print(prediction_point)
    # print(np.array(predictions).shape)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(prediction_point, y_test)
コード例 #17
0
ファイル: run.py プロジェクト: ethanwhois/Keras_predict_power
def main():
    #load parameters
    configs = json.load(open('./data/config.json','r'))
    if not os.path.exists(configs['model']['save_dir']):os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data',configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns'],

    )
    #create RNN model
    model=Model()
    model.build_model(configs)

    #loading trainning data
    x,y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )
    print(x.shape)
    print(y.shape)

    #training model
    model.train(
        x,
        y,
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        save_dir=configs['model']['save_dir']
    )

    #test results
    x_test, y_test = data.get_test_data(
        seq_len= configs['data']['sequence_length'],
        normalise=configs['data']['normalise'],
    )

    #results visualization
    predictions_multiseq = model.predict_sequences_multiple(x_test,configs['data']['sequence_length'],configs['data']['sequence_length'])
    predictions_pointbypoint=model.predict_point_by_point(x_test)

    plot_results_multiple(predictions_multiseq,y_test,configs['data']['sequence_length'])
    plot_results(predictions_pointbypoint,y_test)
コード例 #18
0
ファイル: run.py プロジェクト: mikeszabi/ThermoNN
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
    )

    model = Model()
    model.build_model(configs)

    # get train data
    x, y = data.get_train_data()

    #x=x.squeeze()
    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])
    #    # out-of memory generative training
    #    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    #    model.train_generator(
    #        data_gen=data.generate_train_batch(
    #            batch_size=configs['training']['batch_size'],
    #        ),
    #        epochs=configs['training']['epochs'],
    #        batch_size=configs['training']['batch_size'],
    #        steps_per_epoch=steps_per_epoch,
    #        save_dir=configs['model']['save_dir']
    #    )

    # testing model
    x_test, y_test = data.get_test_data()
    #x_test=x_test.squeeze()

    predictions = model.predict_point_by_point(x_test)

    #   plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
コード例 #19
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training (heavier computation)
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)
	'''
    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    #predictions are made point by point with model.predict_point_by_point
    predictions = model.predict_point_by_point(x_test)
    plot_results(predictions, y_test)
コード例 #20
0
def main():
    #读取所需参数
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    #读取数据
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    #创建RNN模型
    model = Model()
    mymodel = model.build_model(configs)

    plot_model(mymodel, to_file='model.png', show_shapes=True)

    #加载训练数据
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    print(x.shape)
    print(y.shape)

    #训练模型
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    #测试结果
    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    #展示测试效果
    predictions = model.predict_sequences_multiple(
        x_test,
        configs['data']['sequence_length'],
        configs['data']['sequence_length'],
        debug=False)
    print(np.array(predictions).shape)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
コード例 #21
0
ファイル: train.py プロジェクト: copyrightly/stock-prediction
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    dataframe = pd.read_csv(configs['data']['filename'])
    f = open('/Users/yucheng/Downloads/project2/stockIDs.txt', 'r')
    stockIDs = [int(line.split('\n')[0]) for line in f.readlines()]
    for id in stockIDs[377:378]:
        # for id in stockIDs[444:500]:
        print("index: ", stockIDs.index(id))
        data = DataLoader(dataframe, id, configs['data']['train_test_split'],
                          configs['data']['columns'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])
        '''
    	# in-memory training
    	model.train(
    		x,
    		y,
    		epochs = configs['training']['epochs'],
    		batch_size = configs['training']['batch_size'],
    		save_dir = configs['model']['save_dir']
    	)
    	'''
        # out-of memory generative training
        steps_per_epoch = math.ceil(
            (data.len_train - configs['data']['sequence_length']) /
            configs['training']['batch_size'])
        model.train_generator(id=id,
                              data_gen=data.generate_train_batch(
                                  seq_len=configs['data']['sequence_length'],
                                  batch_size=configs['training']['batch_size'],
                                  normalise=configs['data']['normalise']),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'])
コード例 #22
0
def main():
    configs = json.load(open('configcrops.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    # Yogyakarta: Kulon progo, bantul, gunung kidul, sleman, DIY
    # Jawa Barat: Bandung, Tasikmalaya, Majalengka, Cirebon, Kuningan, Garut, Sumedang, Cianjut, Subang, Purwakarta, Indramayu
    # Ciamis, Sukabumi, Bogor, Bekasi, Karawang

    # # out-of memory generative training
    # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    # # save_dir = configs['model']['save_dir']

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # print(x_test)
    # print(y_test)

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])

    predictions_point = model.predict_point_by_point(x_test)
    print(len(predictions_point))
    plot_results(predictions_point, y_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    # predictions_full = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # plot_results(predictions_full, y_test)

    groundtrue = data._groundtruths(1)
    groundtrue = (groundtrue.ravel())
    print(len(groundtrue))

    RMSElist = []
    for i in range(len(groundtrue)):
        errorrate = groundtrue[i] - predictions_point[i]
        hasilkuadrat = errorrate * errorrate
        RMSElist.append(hasilkuadrat)
    RMSE = sum(RMSElist) / (len(predictions_point) - 2)
    RMSE = RMSE**(1 / 2)
    print(RMSE)

    getdataforecast = data._forecasting(5, 1)

    total_prediksi = 5
    takefrom = 5
    forecast_result = model.forecast(total_prediksi, getdataforecast, takefrom)
    # print(forecast_result[0])
    # forecast_result=np.append(forecast_result,[0.0])
    # print(forecast_result)

    n_steps = 8
    # split into samples
    X, y = split_sequence(forecast_result, n_steps)
    # reshape from [samples, timesteps] into [samples, timesteps, features]
    n_features = 1
    # print(X)
    X = X.reshape((X.shape[0], X.shape[1], n_features))
    # define model
    model = Sequential()
    model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mse')
    # fit model
    model.fit(X, y, epochs=200, verbose=0)

    # demonstrate prediction
    for j in range(total_prediksi):
        getxlastnumber = array(forecast_result[(-n_steps - 1):-1])
        x_input = getxlastnumber
        # print(x_input)

        x_input = x_input.reshape((1, n_steps, n_features))
        yhat = model.predict(x_input, verbose=0)
        # print(yhat[0][0])

        forecast_result = np.append(forecast_result, yhat[0])
        # prediction_point=np.append(prediction_point,yhat[0])

    plot_results_onlypredicted(forecast_result)
コード例 #23
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    # -- Data preparation: --
    data = DataLoader(os.path.join('../data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    x_test, y_test, p0_vec = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # -- Init and fit CNN model: --
    n_features = x.shape[2]
    n_steps = configs['data']['sequence_length'] - 1

    # Define model
    model = Sequential()
    model.add(
        Conv1D(filters=128,
               kernel_size=2,
               activation='linear',
               input_shape=(n_steps, n_features)))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dense(50, activation='linear'))
    model.add(Dropout(0.2))
    model.add(Dense(1))
    # Compile model
    model.compile(optimizer=configs['model']['optimizer'],
                  loss=configs['model']['loss'])

    # Fit model
    timer = Timer()
    timer.start()
    print('[Model] Training Started')
    model.fit(x,
              y,
              epochs=configs['training']['epochs'],
              batch_size=configs['training']['batch_size'])
    timer.stop()
    print('[Model] Predicting one step ahead...')

    # Get predictions
    yhat = model.predict(x_test, verbose=0)

    # Denormalize & plot
    p_pred, p_true = denorm_transform(p0_vec, yhat, y_test)
    plot_results(p_pred, p_true)  #de-normalised, i.e., original fex units

    # Compute evaluation metrics
    assess = EvalMetrics(p_true, p_pred)
    MAE = assess.get_MAE()
    RMSE = assess.get_RMSE()
    print("MAE on validation set is: %f" % MAE)
    print("RMSE on validation set is: %f" % RMSE)

    # Save model
    save_dir = configs['model']['save_dir']
    save_fname = os.path.join(
        save_dir, '%s_cnn.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S')))
    model.save(save_fname)
コード例 #24
0
            print(f'current xp,yp {self.xp, self.yp}')

    def get_optimal_params(self):
        return self.xp[np.argmax(self.yp)]


def main():
    pass


if __name__ == '__main__':
    main()
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    bolstm = BayesOptimizerLSTM(configs, model, data, init_params=[[1], [.1]])
    bolstm.fit()

    plt.plot(bolstm.xp, bolstm.yp)
    plt.show()
コード例 #25
0
def main():
    configs = json.load(open('config.json', 'r'))

    #==================== selection =====================#
    if configs['mode']['selection'] == True:
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])

        #IDs = configs['data']['IDs']
        with open(
                'D:\ColumbiaCourses\Advanced Big Data Analytics 6895\milestone3\LSTM-Neural-Network-for-Time-Series-Prediction\data\ID.csv',
                newline='') as f:
            reader = csv.reader(f)
            IDs = list(reader)
        IDs = [x[0] for x in IDs]

        model = Model()
        if configs['mode']['train_new_model'] == True:
            model.build_model(configs)
            print('[Model] Training Started')
            cnt = 0
            #===== train ====#
            for ID in IDs:
                cnt += 1
                filename = str(ID) + '.csv'
                data = DataLoader(filename=os.path.join('data', filename),
                                  split=configs['data']['train_test_split'],
                                  cols=configs['data']['columns'],
                                  test_only=False)
                x, y = data.get_train_data(
                    seq_len=configs['data']['sequence_length'],
                    normalise=configs['data']['normalise'])

                if cnt % 1 == 0:
                    tocheckpoint = True
                else:
                    tocheckpoint = False
                steps_per_epoch = math.ceil(
                    (data.len_train - configs['data']['sequence_length']) /
                    configs['training']['batch_size'])
                model.train_generator_all(
                    data_gen=data.generate_train_batch(
                        seq_len=configs['data']['sequence_length'],
                        batch_size=configs['training']['batch_size'],
                        normalise=configs['data']['normalise']),
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'],
                    steps_per_epoch=steps_per_epoch,
                    save_dir=configs['model']['save_dir'],
                    tocheckpoint=tocheckpoint,
                    ID=ID)
            print('[Model] Training All Finished')
        else:
            model.load_model(configs['mode']['train_file_path'])

        #===== predict =====#
        print('[Prediction]Start to predict and rank')
        ranklist = []
        for ID in IDs:
            print('predicting %s'.format(ID))
            filename = str(ID) + '.csv'
            data = DataLoader(filename=os.path.join('data', filename),
                              split=configs['data']['train_test_split'],
                              cols=configs['data']['columns'],
                              test_only=False)
            x_test, y_test = data.get_test_data(
                seq_len=configs['data']['sequence_length'],
                normalise=configs['data']['normalise'])
            predictions = model.predict_point_by_point(x_test)
            test_score = score(y_true=y_test, y_pred=predictions)
            ranklist.append((ID, *test_score))
        ranklist.sort(key=lambda x: x[1])
        with open("ranklist.csv", "w", newline="") as f:
            writer = csv.writer(f)
            writer.writerows(ranklist)
        return
    #====================================================#

    #==================== single task ===================#
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns'],
        configs['mode']['test_only']  #############################
    )

    model = Model()
    if configs['mode']['test_only'] == True:
        model.load_model(configs['mode']['test_file_path'])
    else:
        if configs['mode']['train_new_model'] == True:
            model.build_model(configs)
        else:
            model.load_model(configs['mode']['train_file_path'])

        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])
        '''
        # in-memory training
        model.train(
            x,
            y,
            epochs = configs['training']['epochs'],
            batch_size = configs['training']['batch_size'],
            save_dir = configs['model']['save_dir']
        )
        '''
        # out-of memory generative training
        steps_per_epoch = math.ceil(
            (data.len_train - configs['data']['sequence_length']) /
            configs['training']['batch_size'])
        model.train_generator(data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'],
                              mode=configs['mode'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['prediction_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)
    test_score = score(y_true=y_test, y_pred=predictions)

    # plot_results_multiple(predictions, y_test, configs['data']['prediction_length'])
    plot_results(predictions, y_test)
コード例 #26
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    lossesMINE = []
    lossesKERAS = []
    # for day_prediction in [1, 2, 3, 4, 5, 10, 50]:
    day_prediction = 10
    print("Predicting %i days..." % day_prediction)

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'],
                               day_pred=day_prediction)

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    # out-of memory generative training

    # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise'],
    #         day_pred=day_prediction
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'],
        day_pred=day_prediction)

    # print(x_test.shape)
    # print(len(data.denormalization_vals))
    # print(y_test.shape)

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)
    #
    # y_test_unormalized = np.zeros((y_test.shape[0], ))
    # prediction_unormalized = []
    #
    # for i in range(4):
    #     for j in range(int(configs['data']['sequence_length']) - 10):
    #         y_test_unormalized[j*(i+1)] = (y_test[j] + 1)*data.data_test[i*int(configs['data']['sequence_length']), 0]
    #         prediction_unormalized.append((predictions[j*(i+1)] + 1)*data.data_test[i*int(configs['data']['sequence_length']), 0])

    npPredictions = np.asarray(predictions)
    # print(type(npPredictions))
    # print(type(y_test))
    # print(npPredictions.shape)
    # print(y_test.shape)
    loss = 0
    for i in range(len(npPredictions)):
        loss += (npPredictions[i] - y_test[i])**2
    print(loss)
    keras_loss = model.model.evaluate(x_test, y_test)
    print(keras_loss)

    lossesMINE.append(loss)
    lossesKERAS.append(keras_loss)

    #plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    real_y = np.reshape(y_test, (y_test.shape[0], )) * np.asarray(
        data.denormalization_vals) + np.asarray(data.denormalization_vals)
    real_pred = predictions * np.asarray(
        data.denormalization_vals) + np.asarray(data.denormalization_vals)
    # print(real_y.shape)
    # print(real_pred.shape)
    data.denormalization_vals = []

    #plot_results(predictions, y_test)

    plot_results(real_pred, real_y)

    print(lossesMINE)
    print(lossesKERAS)
コード例 #27
0
ファイル: run.py プロジェクト: vigneshkumar28/LSTM_IoT
def main():

    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    model = Model()
    model.build_model(configs)

    #get live sensor data from Arduino and predict next 10 sensor data
    sensor_port = serial.Serial('COM7', 9600)
    sensor_port.close()
    sensor_port.open()
    seq_len = configs['data']['sequence_length'],
    sensor_data = []
    predictions_data = []
    live_data = np.arange(seq_len[0] - 1)

    plt.ion()  #real time graph

    while True:
        i = 0
        while i < seq_len[0] - 1:  # store incoming data to testing data array
            b = sensor_port.readline()  # read a byte string
            live_data[i] = float(b.decode())
            sensor_data.append(live_data[i])
            i += 1
        sensor_struct_data = live_data[
            np.newaxis, :, np.newaxis]  #contruct live data for LSTM
        predictions = model.predict_sequence_live(
            sensor_struct_data, configs['data']['sequence_length']
        )  #Shift the window by 1 new prediction each time, re-run predictions on new window
        predictions_data.append(predictions)

        plot_results(predictions_data[-120:], sensor_data[-100:])
        plt.show()
        plt.pause(0.1)  #critical to display continous img

        #predict every 10 seq_len
        #if len(sensor_data) > 1 * seq_len[0]:

        #train every 100 seq_len
        if len(sensor_data) > 10 * seq_len[0]:
            np.savetxt('data\sensor.csv',
                       sensor_data,
                       delimiter=',',
                       header='sensor_value')

            #load data for training
            data = DataLoader(
                os.path.join('data', configs['data']['filename']),
                configs['data']['train_test_split'],
                configs['data']['columns'])

            x, y = data.get_train_data(
                seq_len=configs['data']['sequence_length'],
                normalise=configs['data']['normalise'])
            # in-memory training
            model.train(x,
                        y,
                        epochs=configs['training']['epochs'],
                        batch_size=configs['training']['batch_size'],
                        save_dir=configs['model']['save_dir'])
            sensor_data = sensor_data[-100:]
コード例 #28
0
ファイル: run.py プロジェクト: zebointexas/All_Projects
def main():

    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()

    # model.build_model(configs)
    model.load_model("saved_models/dow_30_50%.h5")

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])

    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    print("x_test.shape")
    print(x_test.shape)

    predictions = model.predict_point_by_point(x_test)

    ########################################################################
    from sklearn.metrics import mean_squared_error
    # loss_final = mean_squared_error(predictions, y_test)
    # print("Testing Loss = " + str(loss_final))
    ########################################################################

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    print(predictions.shape)
    print(y_test.shape)

    m = pd.DataFrame(predictions)
    n = pd.DataFrame(y_test)

    m.to_csv("predictions.csv")
    n.to_csv("y_test.csv")

    p = 0
    t = 0

    t_1 = 0

    count = 0

    for a in range(len(predictions)):

        if (a == 0):
            t_1 = y_test[a]
            continue
        '''
            1 1 1 1 1 1 1 1 1
            1 1 1 1 1 1 1 1 1
        
        '''

        p = predictions[a]
        t = y_test[a]

        match = (t - t_1) * (p - t_1)

        if (match > 0):
            count += 1

        t_1 = t

    print("Good prediction rate = " + str(count / len(predictions)))

    plot_results(predictions, y_test)
コード例 #29
0
def main():
    # instantiation
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
	# in-memory training
	model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size']
	)
	'''
    # out-of memory generative training
    # 不懂为什么要自己算这个,LSTM不是自己自带batch参数吗,为什么要自己算出来一共要输多少次batch???

    # 会出现:
    # in data_generator_task, generator_output = next(self._generator), StopIteration
    # in fit_generator, str(generator_output))
    # output of generator should be a tuple `(x, y, sample_weight)` or `(x, y)`. Found: None
    # 所以出错的时候,手动减少steps_per_epoch
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size']) - 7
    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch)

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])

    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # predictions = model.predict_point_by_point(x_test)

    y_true_train, y_true_test = data.get_split_data()
    unnorm_data = predict_unnorm_data(
        y_true_test,
        prediction_len=configs['data']['sequence_length'],
        predictions=predictions)
    # 计算RMSE并输出dataframe
    begin_date = datetime(year=2018, month=9, day=18)
    end_date = begin_date + timedelta(
        days=(configs['data']['sequence_length'] - 1))
    # y_true_test:(301,2)
    y_true_test = pd.DataFrame(y_true_test)
    file = pd.read_csv(os.path.join('data', configs['data']['filename']))
    file = file['time'][len(y_true_train):]
    file = pd.Series(file)
    # 出现了无法新建列并赋值的error
    # 因为dataframe和Series都有自己的index,.values才能取到真正的值并赋给下一个变量
    y_true_test['time'] = file.values
    y_true_test = y_true_test.set_index('time')
    y_true_test.index = pd.to_datetime(y_true_test.index)
    calc_RMSE(predicted_data=unnorm_data,
              y_test_true=y_true_test,
              begin_date=begin_date,
              end_date=end_date)
コード例 #30
0
def main():
    config_path = 'config/config.json'
    with open(config_path, 'r') as f:
        configs = json.load(f)
        logging.info("Loaded {}".format(config_path))

    logging.info("\n{}\n".format(configs))

    data_path = configs['data']['filename']
    data_dir = os.path.dirname(data_path)
    dtypes = configs['data'].get('dtypes', None)
    windowed_normalization = configs['data']['normalise']

    data = DataLoader(data_path,
                      configs['data']['train_test_split'],
                      configs['data']['columns'],
                      scaler_path=os.path.join(data_dir, "scaler"),
                      windowed_normalization=windowed_normalization,
                      dtypes=dtypes)

    model = Model()

    if configs['model'].get('load_model'):
        model_path = os.path.join(configs['model']['load_model'])
        logging.info("Loading {}".format(model_path))
        model.load_model(model_path, configs)
        plot_dir = os.path.join(os.path.dirname(model_path), "plots")
        os.makedirs(plot_dir, exist_ok=True)
    else:
        plot_dir = os.path.join(configs['model']['save_dir'], "plots")
        os.makedirs(plot_dir, exist_ok=True)
        model.build_model(configs)
        x, y = data.get_train_data(
            seq_len=configs['data']['sequence_length'],
            windowed_normalization=windowed_normalization)
        '''
        # in-memory training
        model.train(
            x,
            y,
            epochs = configs['training']['epochs'],
            batch_size = configs['training']['batch_size'],
            save_dir = configs['model']['save_dir']
        )
        '''
        # out-of-memory generative training
        steps_per_epoch = math.ceil(
            (data.len_train - configs['data']['sequence_length']) /
            configs['training']['batch_size'])
        model.train_generator(data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            windowed_normalization=windowed_normalization),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        windowed_normalization=windowed_normalization)

    predictions_multiple = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    plot_results_multiple(predictions_multiple,
                          y_test,
                          configs['data']['sequence_length'],
                          out_path=os.path.join(plot_dir, "multiple.png"))
コード例 #31
0
ファイル: run.py プロジェクト: ChrisRusu1/Bitcoin_Prediction
def main():
    do = 4
    if do == 0:
        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])

        data = DataLoader(os.path.join('data', configs['data']['filename']),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])
        # in-memory training
        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'],
                    save_dir=configs['model']['save_dir'],
                    X_test=x_test,
                    Y_test=y_test,
                    saveName=str(configs['training']['epochs']))
        '''
        # out-of memory generative training
        steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
        model.train_generator(
            data_gen=data.generate_train_batch(
                seq_len=configs['data']['sequence_length'],
                batch_size=configs['training']['batch_size'],
                normalise=configs['data']['normalise']
            ),
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            steps_per_epoch=steps_per_epoch,
            save_dir=configs['model']['save_dir']
        )
        '''

        predictions = model.predict_sequences_multiple(
            x_test, configs['data']['sequence_length'],
            configs['data']['sequence_length'])
        # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
        # predictions = model.predict_point_by_point(x_test)

        plot_results_multiple(predictions, y_test,
                              configs['data']['sequence_length'])
        # plot_results(predictions, y_test)
    elif do == -1:

        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])

        data = DataLoader(os.path.join('data', configs['data']['filename']),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])
        # in-memory training

        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'],
                    save_dir=configs['model']['save_dir'],
                    X_test=x_test,
                    Y_test=y_test,
                    saveName=str(configs['training']['epochs']))
    elif do == 1:
        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])
        model = Model()
        model.build_model(configs)

        data = DataLoader(os.path.join('data', configs['data']['filename']),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])
        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])
        model.load_model(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\saved_models\10092020-152418-e31.h5"
        )
        #configs['data']['sequence_length'] = 12
        predictions = model.predict_sequences_multiple(
            x_test, configs['data']['sequence_length'],
            configs['data']['sequence_length'])
        # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])

        #predictions = model.predict_point_by_point(x_test)

        plot_results_multiple(
            predictions, y_test, configs['data']
            ['sequence_length'])  #configs['data']['sequence_length']
        #plot_results(predictions, y_test)
    elif do == 2:
        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])
        model = Model()
        model.build_model(configs)
        model.load_model(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\saved_models\23092020-232350-e31.h5"
        )
        normed_test_data = pd.read_csv(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\data\2hdatarecent.csv"
        )
        normed_test_data = normed_test_data.get(configs['data']['columns'])
        normed_test_data = normed_test_data[
            -configs['data']['sequence_length'] + 1:]
        norm_train_data = pd.read_csv(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\data\2hdatarecent.csv"
        )
        norm_train_data = norm_train_data.get(configs['data']['columns'])
        norm_train_data = norm_train_data[-configs['data']['sequence_length'] +
                                          1:]
        normed_test_data = DataLoader.normalise_windows2(
            model, window_data=normed_test_data, single_window=True)
        norm_train_data = DataLoader.normalise_windows2(
            model, window_data=norm_train_data, single_window=True)
        print(normed_test_data)
        test = data.currentData
        model.predict_sequences_multipleSecondAttempt(test, 30)
    else:

        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])
        model = Model()
        model.build_model(configs)

        data = DataLoader(os.path.join('data', configs['data']["recentName"]),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])
        model.load_model(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\saved_models_Test\27122020-171426-e101testv3.h5"
        )
        testdata, printData = data.normData(seq_len=30, normalise=True)

        predictions = model.predict_sequences_multipleSecondAttempt(
            testdata, 29)
        plot_results_multiple(predictions, printData, 29)