Пример #1
0
def predict_prepare(data_x = None,model_path = None,config_file = 'web_flask/LSTM/config.json'):

    config_file = config_file
    configs = json.load(open(config_file, 'r'))

    '''
    data_loader = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns'],
         normalise_meth=configs['data']['normalise']
    )

    # 用所有数据进行预测
    data_x,data_y = data_loader.get_all_data(configs['data']['sequence_length'],\
        normalise=configs['data']['normalise'])

    data_x = data_x[-1]
    '''
    model = Model()
    model_way = './web_flask/LSTM/saved_models/20052019-174244-e60.h5'
    model.load_model(model_way)

    predictions = model.predict_point_by_point(data_x)

    # print(predictions)

    ''' 每五分钟预测一个值,貌似是错的
Пример #2
0
def predict():
    configs = json.load(open(CONFIG, 'r'))

    data = DataLoader(DATA, configs['data']['train_test_split'],
                      configs['data']['columns'])

    global model
    if model == None:
        model = Model()
        model.load_model(MODEL)

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    if TYPE == "sequence":
        predictions = model.predict_sequences_multiple(
            x_test, configs['data']['sequence_length'],
            configs['data']['sequence_length'])
        plot_results_multiple(predictions, y_test,
                              configs['data']['sequence_length'])
    if TYPE == "point" or TYPE == "predict":
        predictions = model.predict_point_by_point(x_test)
    if TYPE == "full":
        predictions = model.predict_sequence_full(
            x_test, configs['data']['sequence_length'])
    if TYPE == "full" or TYPE == "point":
        plot_results(predictions, y_test)
    if TYPE == "predict":
        predicted_value = data.denormalize_windows(
            predictions[-1], configs['data']['sequence_length'])
        sys.stdout.write("--END--{}--END--\n".format(predicted_value))
    else:
        sys.stdout.write("--END--")
Пример #3
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    if not configs['training']['train']:
        model.load_model(filepath='saved_models/02102019-164727-e2.h5')
    else:
        model.train(
            x,
            y,
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            save_dir=configs['model']['save_dir']
        )
    # out-of memory generative training
    # steps_per_epoch = math.ceil(
    #     (data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'],
    #                                                configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    predictions = model.predict_point_by_point(x_test)
    plot_results(predictions, y_test)
def main(model_name=None):
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()

    if model_name:
        saved_model = os.path.join(configs['model']['save_dir'], model_name)
        model.load_model(saved_model)
    else:
        model.build_model(configs)

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    '''
        # in-memory training
        model.train(
            x,
            y,
            epochs = configs['training']['epochs'],
            batch_size = configs['training']['batch_size'],
            save_dir = configs['model']['save_dir']
        )
    '''

    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])
    if not model_name:
        model.train_generator(data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
Пример #5
0
def predict(test):
    # initialize dataLoader with split of 0

    cleaner.main_func()

    data = DataLoader(test, 0, configs['data']['columns'])
    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'], normalise=False)
    model = Model()
    model.load_model('saved_models/tracker.h5')
    predictions = model.predict_point_by_point(x_test)
    plot_results(predictions, y_test)
    return "OK"
Пример #6
0
def main(train_after=False):
    config_file = 'web_flask/LSTM/config.json'
    configs = json.load(open(config_file, 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(configs['data']['filename'],
                      configs['data']['train_test_split'],
                      configs['data']['columns'],
                      normalise_meth=configs['data']['normalise'])

    model = Model()
    model.build_model(configs) if not train_after else \
        model.load_model(os.path.join( configs['model']['save_dir'],configs['model']['model_name']))
    history = LossHistory()

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'],
                history=history,
                x_test=x_test,
                y_test=y_test)
    '''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    '''

    history.loss_plot('epoch')
    #loss, accuracy = model.model.evaluate(x_test, y_test)
    #print(loss,accuracy)

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x[0])  #_test)

    #plot_results_multiple(predictions, y, configs['data']['sequence_length'])
    plot_results(predictions, y)
Пример #7
0
def main():
    configs = json.load(open('config.json', 'r'))
    model = Model()
    model.load_model("./saved_models/model2.h5")

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
Пример #8
0
def main_plot():

    configs = json.load(open(config_file, 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'],
                      normalise_meth=configs['data']['normalise'])

    x, y = data.get_test_data(seq_len=configs['data']['sequence_length'],
                              normalise=configs['data']['normalise'])
    model = Model()
    global newest_model
    if newest_model:
        model_way = newest_model
    else:
        model_way = '/home/bf/Documents/Projects/helpplay/HelpPlay/train/LSTM-Neural-Network-for-Time-Series-Prediction/saved_models/10062019-163648-e40.h5'
    model.load_model(model_way)
    print(model.model.evaluate(x, y))
    pre_y = model.predict_point_by_point(x)
    print(x)
    plot_results(pre_y, y)
Пример #9
0
model_id = configs['model']['model_id']
save_dir = configs['model']['save_dir']

dataloader = DataLoader()
x_scaler_filename = save_dir + "/" + model_id + "-x.scaler"
y_scaler_filename = save_dir + "/" + model_id + "-y.scaler"
dataloader.restore_scalers(x_scaler_filename, y_scaler_filename)

filename = os.path.join('data', configs['data']['filename'])
dataframe = pandas.read_csv(filename, sep=',', encoding='utf-8')
dataframe.index.name = 'fecha'
x_data = dataframe.get(configs['data']['x_cols'], ).values

in_seq_len = configs['data']['input_sequence_length']
x_data = x_data[:, :]  # pick three sequences to make predictions
input_data = dataloader.prepare_input_data(x_data, in_seq_len)
print("Input vector shape: " + str(x_data.shape))

model_filename = sys.argv[2]
model = Model(configs['data']['output_mode'])
model.load_model(filepath=model_filename)

print("Plotting predictions point by point on validation set")
predictions = model.predict_point_by_point(input_data)
print(predictions.shape)
unscaled_predictions = dataloader.recompose_results(predictions[:, 0, :],
                                                    side="y")
plot_results(unscaled_predictions,
             x_data[configs['data']['input_sequence_length']:, :])
Пример #10
0
def main():

    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()

    # model.build_model(configs)
    model.load_model("saved_models/dow_30_50%.h5")

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    # out-of memory generative training
    steps_per_epoch = math.ceil(
        (data.len_train - configs['data']['sequence_length']) /
        configs['training']['batch_size'])

    model.train_generator(data_gen=data.generate_train_batch(
        seq_len=configs['data']['sequence_length'],
        batch_size=configs['training']['batch_size'],
        normalise=configs['data']['normalise']),
                          epochs=configs['training']['epochs'],
                          batch_size=configs['training']['batch_size'],
                          steps_per_epoch=steps_per_epoch,
                          save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    print("x_test.shape")
    print(x_test.shape)

    predictions = model.predict_point_by_point(x_test)

    ########################################################################
    from sklearn.metrics import mean_squared_error
    # loss_final = mean_squared_error(predictions, y_test)
    # print("Testing Loss = " + str(loss_final))
    ########################################################################

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    print(predictions.shape)
    print(y_test.shape)

    m = pd.DataFrame(predictions)
    n = pd.DataFrame(y_test)

    m.to_csv("predictions.csv")
    n.to_csv("y_test.csv")

    p = 0
    t = 0

    t_1 = 0

    count = 0

    for a in range(len(predictions)):

        if (a == 0):
            t_1 = y_test[a]
            continue
        '''
            1 1 1 1 1 1 1 1 1
            1 1 1 1 1 1 1 1 1
        
        '''

        p = predictions[a]
        t = y_test[a]

        match = (t - t_1) * (p - t_1)

        if (match > 0):
            count += 1

        t_1 = t

    print("Good prediction rate = " + str(count / len(predictions)))

    plot_results(predictions, y_test)
def main():
    config_path = 'config/config.json'
    with open(config_path, 'r') as f:
        configs = json.load(f)
        logging.info("Loaded {}".format(config_path))

    logging.info("\n{}\n".format(configs))

    data_path = configs['data']['filename']
    data_dir = os.path.dirname(data_path)
    dtypes = configs['data'].get('dtypes', None)
    windowed_normalization = configs['data']['normalise']

    data = DataLoader(data_path,
                      configs['data']['train_test_split'],
                      configs['data']['columns'],
                      scaler_path=os.path.join(data_dir, "scaler"),
                      windowed_normalization=windowed_normalization,
                      dtypes=dtypes)

    model = Model()

    if configs['model'].get('load_model'):
        model_path = os.path.join(configs['model']['load_model'])
        logging.info("Loading {}".format(model_path))
        model.load_model(model_path, configs)
        plot_dir = os.path.join(os.path.dirname(model_path), "plots")
        os.makedirs(plot_dir, exist_ok=True)
    else:
        plot_dir = os.path.join(configs['model']['save_dir'], "plots")
        os.makedirs(plot_dir, exist_ok=True)
        model.build_model(configs)
        x, y = data.get_train_data(
            seq_len=configs['data']['sequence_length'],
            windowed_normalization=windowed_normalization)
        '''
        # in-memory training
        model.train(
            x,
            y,
            epochs = configs['training']['epochs'],
            batch_size = configs['training']['batch_size'],
            save_dir = configs['model']['save_dir']
        )
        '''
        # out-of-memory generative training
        steps_per_epoch = math.ceil(
            (data.len_train - configs['data']['sequence_length']) /
            configs['training']['batch_size'])
        model.train_generator(data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            windowed_normalization=windowed_normalization),
                              epochs=configs['training']['epochs'],
                              batch_size=configs['training']['batch_size'],
                              steps_per_epoch=steps_per_epoch,
                              save_dir=configs['model']['save_dir'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        windowed_normalization=windowed_normalization)

    predictions_multiple = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    plot_results_multiple(predictions_multiple,
                          y_test,
                          configs['data']['sequence_length'],
                          out_path=os.path.join(plot_dir, "multiple.png"))
Пример #12
0
def main():
    code_file=open("app/controllers/temp/code.txt","r")
    #code_file=open("/root/stock/app/controllers/temp/code.txt","r")
    the_code = code_file.read().splitlines()
    code_file.close()
    the_code=''.join(the_code)
    the_code=the_code.zfill(6)

    input_file = "app/controllers/temp/history_" + the_code + ".csv"
    output_file = "app/controllers/temp/future/future_" + the_code +".csv"
    #input_file = "/root/stock/app/controllers/temp/data/history/history_" + the_code + ".csv"
    #output_file = "/root/stock/app/controllers/temp/future/future_" + the_code +",csv"


    configs = json.load(open('app/controllers/temp/future/config.json', 'r'))

    data = DataLoader(
        input_file,
        configs['data']['train_test_split'],
        configs['data']['columns']
    )
    #model = Model()
    model_close = Model()
    model_open = Model()
    model_high = Model()
    model_low = Model()

    #model.load_model("app/controllers/temp/future/model.h5")
    model_close.load_model("app/controllers/temp/future/close.h5")
    model_open.load_model("app/controllers/temp/future/open.h5")
    model_high.load_model("app/controllers/temp/future/high.h5")
    model_low.load_model("app/controllers/temp/future/low.h5")
    x_test, y_test, data_raw = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    x_close = x_test[:,:,[0,4]]
    x_open = x_test[:,:,[1,4]]
    x_high = x_test[:,:,[2,4]]
    x_low = x_test[:,:,[3,4]]

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    predictions_close = model_close.predict_sequences_multiple(x_close, configs['data']['sequence_length'], configs['data']['sequence_length'])
    predictions_open = model_open.predict_sequences_multiple(x_open, configs['data']['sequence_length'], configs['data']['sequence_length'])
    predictions_high = model_high.predict_sequences_multiple(x_high, configs['data']['sequence_length'], configs['data']['sequence_length'])
    predictions_low = model_low.predict_sequences_multiple(x_low, configs['data']['sequence_length'], configs['data']['sequence_length'])

    predicted=[]
#    for i, data in enumerate(predictions):
#        predicted=predicted+data

    predicted_close=[]
    for i, data in enumerate(predictions_close):
        predicted_close=predicted_close+data
    predicted_open=[]
    for i, data in enumerate(predictions_open):
        predicted_open=predicted_open+data
    predicted_high=[]
    for i, data in enumerate(predictions_high):
        predicted_high=predicted_high+data
    predicted_low=[]
    for i, data in enumerate(predictions_low):
        predicted_low=predicted_low+data

    predicted.append(predicted_close)
    predicted.append(predicted_open)
    predicted.append(predicted_high)
    predicted.append(predicted_low)

    data_raw = np.array(data_raw).astype(float)
    denormalised_data = []

    predicted = np.array(predicted).astype(float)

#    for i in range(len(predicted)):
#        denormalised_col = (float(predicted[i])+1)*float(data_raw[i, 0, 0])
#        denormalised_data.append(denormalised_col)

    for i in range(predicted.shape[0]):
        denormalised_window = []
        for j in range(predicted.shape[1]):
            denormalised_col = (float(predicted[i,j])+1)*float(data_raw[j,i, 0])
            denormalised_window.append(denormalised_col)
        denormalised_data.append(denormalised_window)
    denormalised_data = np.array(denormalised_data).T
    #print denormalised_data

    name = ['close','open','high','low']
    test = pd.DataFrame(columns=name,data=denormalised_data)
    test.to_csv(output_file)
    os.remove(input_file)
Пример #13
0
def main():
    do = 4
    if do == 0:
        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])

        data = DataLoader(os.path.join('data', configs['data']['filename']),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])
        # in-memory training
        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'],
                    save_dir=configs['model']['save_dir'],
                    X_test=x_test,
                    Y_test=y_test,
                    saveName=str(configs['training']['epochs']))
        '''
        # out-of memory generative training
        steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
        model.train_generator(
            data_gen=data.generate_train_batch(
                seq_len=configs['data']['sequence_length'],
                batch_size=configs['training']['batch_size'],
                normalise=configs['data']['normalise']
            ),
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            steps_per_epoch=steps_per_epoch,
            save_dir=configs['model']['save_dir']
        )
        '''

        predictions = model.predict_sequences_multiple(
            x_test, configs['data']['sequence_length'],
            configs['data']['sequence_length'])
        # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
        # predictions = model.predict_point_by_point(x_test)

        plot_results_multiple(predictions, y_test,
                              configs['data']['sequence_length'])
        # plot_results(predictions, y_test)
    elif do == -1:

        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])

        data = DataLoader(os.path.join('data', configs['data']['filename']),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])
        # in-memory training

        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'],
                    save_dir=configs['model']['save_dir'],
                    X_test=x_test,
                    Y_test=y_test,
                    saveName=str(configs['training']['epochs']))
    elif do == 1:
        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])
        model = Model()
        model.build_model(configs)

        data = DataLoader(os.path.join('data', configs['data']['filename']),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])
        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])
        model.load_model(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\saved_models\10092020-152418-e31.h5"
        )
        #configs['data']['sequence_length'] = 12
        predictions = model.predict_sequences_multiple(
            x_test, configs['data']['sequence_length'],
            configs['data']['sequence_length'])
        # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])

        #predictions = model.predict_point_by_point(x_test)

        plot_results_multiple(
            predictions, y_test, configs['data']
            ['sequence_length'])  #configs['data']['sequence_length']
        #plot_results(predictions, y_test)
    elif do == 2:
        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])
        model = Model()
        model.build_model(configs)
        model.load_model(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\saved_models\23092020-232350-e31.h5"
        )
        normed_test_data = pd.read_csv(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\data\2hdatarecent.csv"
        )
        normed_test_data = normed_test_data.get(configs['data']['columns'])
        normed_test_data = normed_test_data[
            -configs['data']['sequence_length'] + 1:]
        norm_train_data = pd.read_csv(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\data\2hdatarecent.csv"
        )
        norm_train_data = norm_train_data.get(configs['data']['columns'])
        norm_train_data = norm_train_data[-configs['data']['sequence_length'] +
                                          1:]
        normed_test_data = DataLoader.normalise_windows2(
            model, window_data=normed_test_data, single_window=True)
        norm_train_data = DataLoader.normalise_windows2(
            model, window_data=norm_train_data, single_window=True)
        print(normed_test_data)
        test = data.currentData
        model.predict_sequences_multipleSecondAttempt(test, 30)
    else:

        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])
        model = Model()
        model.build_model(configs)

        data = DataLoader(os.path.join('data', configs['data']["recentName"]),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])
        model.load_model(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\saved_models_Test\27122020-171426-e101testv3.h5"
        )
        testdata, printData = data.normData(seq_len=30, normalise=True)

        predictions = model.predict_sequences_multipleSecondAttempt(
            testdata, 29)
        plot_results_multiple(predictions, printData, 29)
Пример #14
0
# dataframe.reset_index(drop=True, inplace=True)
index = dataframe[dataframe['time'] == date].index.values.astype(int)[0]
dataframe = dataframe[index - 49:index + 2]
print("making stock " + stockID + "'s dataframe")

configs = json.load(open('config.json', 'r'))
data = DataLoader(dataframe, int(stockID), 0, configs['data']['columns'])

x_test, y_test = data.get_test_data(seq_len=configs['data']['sequence_length'],
                                    normalise=configs['data']['normalise'])
print("len of test(expected: 1):", len(x_test))

print("loading model...")
model = Model()
model.load_model('saved_models/stock-' + stockID + '.h5')

predictions = model.predict_sequences_multiple(
    x_test, configs['data']['sequence_length'], 5)
# print("predictions: ", predictions)

curr_price = x_test[0][-1][0]
changes = []
for p in predictions[0]:
    # print("change: ", (p+1) / (curr_price+1) -1)
    changes.append((p + 1) / (curr_price + 1) - 1)
if max(changes) >= 0.05:
    print("stockID: " + stockID + ", prediction time: " + date +
          ", change: +5%\n")
    print("stockID: " + stockID + ", prediction time: " + date +
          ", change: +3%\n")