Пример #1
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataProcessor(os.path.join('data', configs['data']['filename']),
                         configs['data']['train_test_split'],
                         configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=".")

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions_pointbypoint = model.predict_point_by_point(x_test)
    plot_results(predictions_pointbypoint, y_test)

    predictions_fullseq = model.predict_sequence_full(
        x_test, configs['data']['sequence_length'])
    plot_results(predictions_fullseq, y_test)
Пример #2
0
def main(train_after=False):
    config_file = 'web_flask/LSTM/config.json'
    configs = json.load(open(config_file, 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(configs['data']['filename'],
                      configs['data']['train_test_split'],
                      configs['data']['columns'],
                      normalise_meth=configs['data']['normalise'])

    model = Model()
    model.build_model(configs) if not train_after else \
        model.load_model(os.path.join( configs['model']['save_dir'],configs['model']['model_name']))
    history = LossHistory()

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'],
                history=history,
                x_test=x_test,
                y_test=y_test)
    '''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )

    '''

    history.loss_plot('epoch')
    #loss, accuracy = model.model.evaluate(x_test, y_test)
    #print(loss,accuracy)

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x[0])  #_test)

    #plot_results_multiple(predictions, y, configs['data']['sequence_length'])
    plot_results(predictions, y)
Пример #3
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    if not configs['training']['train']:
        model.load_model(filepath='saved_models/02102019-164727-e2.h5')
    else:
        model.train(
            x,
            y,
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            save_dir=configs['model']['save_dir']
        )
    # out-of memory generative training
    # steps_per_epoch = math.ceil(
    #     (data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'],
    #                                                configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    predictions = model.predict_point_by_point(x_test)
    plot_results(predictions, y_test)
Пример #4
0
def main_sin():
    config = json.load(open("config_sin.json", 'r'))
    data = DataLoader(os.path.join('data', config['data']['filename']),
                      config['data']['train_test_split'],
                      config['data']['columns'])
    x_train, y_train = data.get_train_data(config['data']['sequence_length'],
                                           config['data']['normalise'])
    x_test, y_test = data.get_test_data()
    model = Model()
    model.build_model(config)
    model.train(x_train, y_train, config['training']['epochs'],
                config['training']['batch_size'])
Пример #5
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    # 从已经保存的模型中加载模型,此时不需要再进行模型训练:即不需要再执行model.train()部分
    # model.load_model(r'saved_models/15102019-155115-e2.h5')

    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    # print('x的shape是:{0}'.format(x.shape))  # (3942, 49, 2)
    # print('y的shape是:{0}'.format(y.shape))  # (3942, 1)

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])
    '''
    # out-of memory generative training
    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    model.train_generator(
        data_gen=data.generate_train_batch(
            seq_len=configs['data']['sequence_length'],
            batch_size=configs['training']['batch_size'],
            normalise=configs['data']['normalise']
        ),
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        steps_per_epoch=steps_per_epoch,
        save_dir=configs['model']['save_dir']
    )
    '''

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
Пример #6
0
def train_network(configs, dataloader):

    # build model
    model = Model(configs['data']['input_mode'],
                  configs['data']['output_mode'])
    model.build_model(configs['model'])

    # in-memory training
    out_seq_len = configs['data']['input_sequence_length'] if configs['data'][
        'output_mode'] == "many_to_many" else 1
    x_train, y_train = dataloader.get_train_data(
        in_seq_len=configs['data']['input_sequence_length'],
        out_seq_len=out_seq_len)

    x_test, y_test = dataloader.get_test_data(
        in_seq_len=configs['data']['input_sequence_length'],
        out_seq_len=out_seq_len)

    history = model.train(
        x_train,
        y_train,
        x_test,
        y_test,
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        shuffle=configs['model']['shuffle_training_data'],
        allow_early_stop=configs['training']['allow_early_stop'],
    )

    return model, history
Пример #7
0
def main(choice):
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    model = Model()
    model.build_model(configs)
    if (choice != 'info'):
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        # in-memory training
        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'])

        # out-of memory generative training
        # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
        # model.train_generator(
        #     data_gen = data.generate_train_batch(
        #         seq_len = configs['data']['sequence_length'],
        #         batch_size = configs['training']['batch_size'],
        #         normalise = configs['data']['normalise']
        #     ),
        #     epochs = configs['training']['epochs'],
        #     batch_size = configs['training']['batch_size'],
        #     steps_per_epoch = steps_per_epoch
        # )

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])

        if (choice == "multi"):
            predictions = model.predict_sequences_multiple(
                x_test, configs['data']['sequence_length'],
                configs['data']['sequence_length'])
            plot_results_multiple(predictions, y_test,
                                  configs['data']['sequence_length'])
        elif (choice == "seq"):
            predictions = model.predict_sequence_full(
                x_test, configs['data']['sequence_length'])
            plot_results(predictions, y_test)
        else:
            predictions = model.predict_point_by_point(x_test)
            plot_results(predictions, y_test)
def main():
    configs = json.load(open('config.json', 'r'))

    data = DataLoader(configs['data']['filename'],
                      os.path.join('data', configs['data']['filepath']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'])

    # out-of memory generative train
    '''
	steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
	model.train_generator(
		data_gen = data.generate_train_batch(
			seq_len = configs['data']['sequence_length'],
			batch_size = configs['training']['batch_size'],
			normalise = configs['data']['normalise']
		),
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		steps_per_epoch = steps_per_epoch
	)
	'''

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predictions = model.predict_sequences_multiple(
        x_test, configs['data']['sequence_length'],
        configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    #predictions = model.predict_point_by_point(x_test)
    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
Пример #9
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):os.makedirs(configs['model']['save_dir'])

    model = Model()
    my_model = model.build_model(configs)

    plot_model(my_model, to_file='output\model.png', show_shapes=True)
    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    x, y = data.get_train_data(
        configs['data']['sequence_length'],
        configs['data']['normalise']
    )

    print(x.shape)
    print(y.shape)

    print(configs['training']['batch_size'])
    print(configs['model']['save_dir'])
    model.train(x,
                y,
                configs['training']['epochs'],
                configs['training']['batch_size'],
                configs['model']['save_dir']
                )

    x_test, y_test = data.get_test_data(
        configs['data']['sequence_length'],
        configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiplt(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequences_full(x_test, configs['data']['sequence_length'])
    prediction_point = model.predict_point_by_point(x_test)

    # print(prediction_point)
    # print(np.array(predictions).shape)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(prediction_point, y_test)
Пример #10
0
def main():
    #load parameters
    configs = json.load(open('./data/config.json','r'))
    if not os.path.exists(configs['model']['save_dir']):os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data',configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns'],

    )
    #create RNN model
    model=Model()
    model.build_model(configs)

    #loading trainning data
    x,y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )
    print(x.shape)
    print(y.shape)

    #training model
    model.train(
        x,
        y,
        epochs=configs['training']['epochs'],
        batch_size=configs['training']['batch_size'],
        save_dir=configs['model']['save_dir']
    )

    #test results
    x_test, y_test = data.get_test_data(
        seq_len= configs['data']['sequence_length'],
        normalise=configs['data']['normalise'],
    )

    #results visualization
    predictions_multiseq = model.predict_sequences_multiple(x_test,configs['data']['sequence_length'],configs['data']['sequence_length'])
    predictions_pointbypoint=model.predict_point_by_point(x_test)

    plot_results_multiple(predictions_multiseq,y_test,configs['data']['sequence_length'])
    plot_results(predictions_pointbypoint,y_test)
Пример #11
0
def main():
    #读取所需参数
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    #读取数据
    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])
    #创建RNN模型
    model = Model()
    mymodel = model.build_model(configs)

    plot_model(mymodel, to_file='model.png', show_shapes=True)

    #加载训练数据
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])
    print(x.shape)
    print(y.shape)

    #训练模型
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    #测试结果
    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    #展示测试效果
    predictions = model.predict_sequences_multiple(
        x_test,
        configs['data']['sequence_length'],
        configs['data']['sequence_length'],
        debug=False)
    print(np.array(predictions).shape)

    plot_results_multiple(predictions, y_test,
                          configs['data']['sequence_length'])
Пример #12
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
    )

    model = Model()
    model.build_model(configs)

    # get train data
    x, y = data.get_train_data()

    #x=x.squeeze()
    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])
    #    # out-of memory generative training
    #    steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    #    model.train_generator(
    #        data_gen=data.generate_train_batch(
    #            batch_size=configs['training']['batch_size'],
    #        ),
    #        epochs=configs['training']['epochs'],
    #        batch_size=configs['training']['batch_size'],
    #        steps_per_epoch=steps_per_epoch,
    #        save_dir=configs['model']['save_dir']
    #    )

    # testing model
    x_test, y_test = data.get_test_data()
    #x_test=x_test.squeeze()

    predictions = model.predict_point_by_point(x_test)

    #   plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)
Пример #13
0
def main():
    model_config = json.load(open("config.json", "r"))["model"]
    dataset_path = 'data/'
    # Requirement: Check the presence of the dataset
    if check_dataset(dataset_path):
        configs = json.load(open('config.json', 'r'))
        # 1) Build the model
        model = Model(model_config=model_config)
        model.build_model(configs)
        batch_size, epochs, = 4, 30
        cols = configs['training']['cols']
        sequence_length = configs['data']['sequence_length']
        save_dir = "model"
        l = 0
        dataset_path = glob.glob("{}/*.txt".format(dataset_path))
        # 2 ) Loop over the files in the dataset folder
        for filename in dataset_path:
            print("Training {}/{} - {}".format(l, len(dataset_path), filename))
            l += 1
            # 3) Divide the dataset in parts and loop over them
            chunksize = 10**4
            for chunk in pd.read_csv(filename, chunksize=chunksize):
                # 4) Get and prepare data
                data = DataModel()
                x = data.get_train_data(
                    data=[x for x in chunk.get(cols).values.tolist()],
                    seq_len=sequence_length)
                X_train, X_test, y_train, y_test = train_test_split(
                    data.dataX, data.dataY, test_size=0.33)
                print(y_train.shape)
                # 5) Train the model
                model.train(X_train,
                            X_test,
                            y_train,
                            y_test,
                            epochs=epochs,
                            batch_size=batch_size,
                            save_dir=save_dir)
Пример #14
0
def main():
    configs = json.load(open('configcrops.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'])

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    # Yogyakarta: Kulon progo, bantul, gunung kidul, sleman, DIY
    # Jawa Barat: Bandung, Tasikmalaya, Majalengka, Cirebon, Kuningan, Garut, Sumedang, Cianjut, Subang, Purwakarta, Indramayu
    # Ciamis, Sukabumi, Bogor, Bekasi, Karawang

    # # out-of memory generative training
    # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    # # save_dir = configs['model']['save_dir']

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    # print(x_test)
    # print(y_test)

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])

    predictions_point = model.predict_point_by_point(x_test)
    print(len(predictions_point))
    plot_results(predictions_point, y_test)

    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    # predictions_full = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    # plot_results(predictions_full, y_test)

    groundtrue = data._groundtruths(1)
    groundtrue = (groundtrue.ravel())
    print(len(groundtrue))

    RMSElist = []
    for i in range(len(groundtrue)):
        errorrate = groundtrue[i] - predictions_point[i]
        hasilkuadrat = errorrate * errorrate
        RMSElist.append(hasilkuadrat)
    RMSE = sum(RMSElist) / (len(predictions_point) - 2)
    RMSE = RMSE**(1 / 2)
    print(RMSE)

    getdataforecast = data._forecasting(5, 1)

    total_prediksi = 5
    takefrom = 5
    forecast_result = model.forecast(total_prediksi, getdataforecast, takefrom)
    # print(forecast_result[0])
    # forecast_result=np.append(forecast_result,[0.0])
    # print(forecast_result)

    n_steps = 8
    # split into samples
    X, y = split_sequence(forecast_result, n_steps)
    # reshape from [samples, timesteps] into [samples, timesteps, features]
    n_features = 1
    # print(X)
    X = X.reshape((X.shape[0], X.shape[1], n_features))
    # define model
    model = Sequential()
    model.add(LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mse')
    # fit model
    model.fit(X, y, epochs=200, verbose=0)

    # demonstrate prediction
    for j in range(total_prediksi):
        getxlastnumber = array(forecast_result[(-n_steps - 1):-1])
        x_input = getxlastnumber
        # print(x_input)

        x_input = x_input.reshape((1, n_steps, n_features))
        yhat = model.predict(x_input, verbose=0)
        # print(yhat[0][0])

        forecast_result = np.append(forecast_result, yhat[0])
        # prediction_point=np.append(prediction_point,yhat[0])

    plot_results_onlypredicted(forecast_result)
Пример #15
0
def main():
    do = 4
    if do == 0:
        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])

        data = DataLoader(os.path.join('data', configs['data']['filename']),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])
        # in-memory training
        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'],
                    save_dir=configs['model']['save_dir'],
                    X_test=x_test,
                    Y_test=y_test,
                    saveName=str(configs['training']['epochs']))
        '''
        # out-of memory generative training
        steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
        model.train_generator(
            data_gen=data.generate_train_batch(
                seq_len=configs['data']['sequence_length'],
                batch_size=configs['training']['batch_size'],
                normalise=configs['data']['normalise']
            ),
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            steps_per_epoch=steps_per_epoch,
            save_dir=configs['model']['save_dir']
        )
        '''

        predictions = model.predict_sequences_multiple(
            x_test, configs['data']['sequence_length'],
            configs['data']['sequence_length'])
        # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
        # predictions = model.predict_point_by_point(x_test)

        plot_results_multiple(predictions, y_test,
                              configs['data']['sequence_length'])
        # plot_results(predictions, y_test)
    elif do == -1:

        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])

        data = DataLoader(os.path.join('data', configs['data']['filename']),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])
        # in-memory training

        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'],
                    save_dir=configs['model']['save_dir'],
                    X_test=x_test,
                    Y_test=y_test,
                    saveName=str(configs['training']['epochs']))
    elif do == 1:
        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])
        model = Model()
        model.build_model(configs)

        data = DataLoader(os.path.join('data', configs['data']['filename']),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])
        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])
        model.load_model(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\saved_models\10092020-152418-e31.h5"
        )
        #configs['data']['sequence_length'] = 12
        predictions = model.predict_sequences_multiple(
            x_test, configs['data']['sequence_length'],
            configs['data']['sequence_length'])
        # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])

        #predictions = model.predict_point_by_point(x_test)

        plot_results_multiple(
            predictions, y_test, configs['data']
            ['sequence_length'])  #configs['data']['sequence_length']
        #plot_results(predictions, y_test)
    elif do == 2:
        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])
        model = Model()
        model.build_model(configs)
        model.load_model(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\saved_models\23092020-232350-e31.h5"
        )
        normed_test_data = pd.read_csv(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\data\2hdatarecent.csv"
        )
        normed_test_data = normed_test_data.get(configs['data']['columns'])
        normed_test_data = normed_test_data[
            -configs['data']['sequence_length'] + 1:]
        norm_train_data = pd.read_csv(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\data\2hdatarecent.csv"
        )
        norm_train_data = norm_train_data.get(configs['data']['columns'])
        norm_train_data = norm_train_data[-configs['data']['sequence_length'] +
                                          1:]
        normed_test_data = DataLoader.normalise_windows2(
            model, window_data=normed_test_data, single_window=True)
        norm_train_data = DataLoader.normalise_windows2(
            model, window_data=norm_train_data, single_window=True)
        print(normed_test_data)
        test = data.currentData
        model.predict_sequences_multipleSecondAttempt(test, 30)
    else:

        configs = json.load(open('config.json', 'r'))
        if not os.path.exists(configs['model']['save_dir']):
            os.makedirs(configs['model']['save_dir'])
        model = Model()
        model.build_model(configs)

        data = DataLoader(os.path.join('data', configs['data']["recentName"]),
                          configs['data']['train_test_split'],
                          configs['data']['columns'])
        model.load_model(
            r"C:\Users\chris\Documents\Bitcoin_Prediction\saved_models_Test\27122020-171426-e101testv3.h5"
        )
        testdata, printData = data.normData(seq_len=30, normalise=True)

        predictions = model.predict_sequences_multipleSecondAttempt(
            testdata, 29)
        plot_results_multiple(predictions, printData, 29)
Пример #16
0
def main():

    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    model = Model()
    model.build_model(configs)

    #get live sensor data from Arduino and predict next 10 sensor data
    sensor_port = serial.Serial('COM7', 9600)
    sensor_port.close()
    sensor_port.open()
    seq_len = configs['data']['sequence_length'],
    sensor_data = []
    predictions_data = []
    live_data = np.arange(seq_len[0] - 1)

    plt.ion()  #real time graph

    while True:
        i = 0
        while i < seq_len[0] - 1:  # store incoming data to testing data array
            b = sensor_port.readline()  # read a byte string
            live_data[i] = float(b.decode())
            sensor_data.append(live_data[i])
            i += 1
        sensor_struct_data = live_data[
            np.newaxis, :, np.newaxis]  #contruct live data for LSTM
        predictions = model.predict_sequence_live(
            sensor_struct_data, configs['data']['sequence_length']
        )  #Shift the window by 1 new prediction each time, re-run predictions on new window
        predictions_data.append(predictions)

        plot_results(predictions_data[-120:], sensor_data[-100:])
        plt.show()
        plt.pause(0.1)  #critical to display continous img

        #predict every 10 seq_len
        #if len(sensor_data) > 1 * seq_len[0]:

        #train every 100 seq_len
        if len(sensor_data) > 10 * seq_len[0]:
            np.savetxt('data\sensor.csv',
                       sensor_data,
                       delimiter=',',
                       header='sensor_value')

            #load data for training
            data = DataLoader(
                os.path.join('data', configs['data']['filename']),
                configs['data']['train_test_split'],
                configs['data']['columns'])

            x, y = data.get_train_data(
                seq_len=configs['data']['sequence_length'],
                normalise=configs['data']['normalise'])
            # in-memory training
            model.train(x,
                        y,
                        epochs=configs['training']['epochs'],
                        batch_size=configs['training']['batch_size'],
                        save_dir=configs['model']['save_dir'])
            sensor_data = sensor_data[-100:]
Пример #17
0
def main():
    configs = json.load(open('config.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    data = DataLoader(os.path.join('data', configs['data']['filename']),
                      configs['data']['train_test_split'],
                      configs['data']['columns'])

    lossesMINE = []
    lossesKERAS = []
    # for day_prediction in [1, 2, 3, 4, 5, 10, 50]:
    day_prediction = 10
    print("Predicting %i days..." % day_prediction)

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                               normalise=configs['data']['normalise'],
                               day_pred=day_prediction)

    # in-memory training
    model.train(x,
                y,
                epochs=configs['training']['epochs'],
                batch_size=configs['training']['batch_size'],
                save_dir=configs['model']['save_dir'])

    # out-of memory generative training

    # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise'],
    #         day_pred=day_prediction
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'],
        day_pred=day_prediction)

    # print(x_test.shape)
    # print(len(data.denormalization_vals))
    # print(y_test.shape)

    #predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    #predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)
    #
    # y_test_unormalized = np.zeros((y_test.shape[0], ))
    # prediction_unormalized = []
    #
    # for i in range(4):
    #     for j in range(int(configs['data']['sequence_length']) - 10):
    #         y_test_unormalized[j*(i+1)] = (y_test[j] + 1)*data.data_test[i*int(configs['data']['sequence_length']), 0]
    #         prediction_unormalized.append((predictions[j*(i+1)] + 1)*data.data_test[i*int(configs['data']['sequence_length']), 0])

    npPredictions = np.asarray(predictions)
    # print(type(npPredictions))
    # print(type(y_test))
    # print(npPredictions.shape)
    # print(y_test.shape)
    loss = 0
    for i in range(len(npPredictions)):
        loss += (npPredictions[i] - y_test[i])**2
    print(loss)
    keras_loss = model.model.evaluate(x_test, y_test)
    print(keras_loss)

    lossesMINE.append(loss)
    lossesKERAS.append(keras_loss)

    #plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])

    real_y = np.reshape(y_test, (y_test.shape[0], )) * np.asarray(
        data.denormalization_vals) + np.asarray(data.denormalization_vals)
    real_pred = predictions * np.asarray(
        data.denormalization_vals) + np.asarray(data.denormalization_vals)
    # print(real_y.shape)
    # print(real_pred.shape)
    data.denormalization_vals = []

    #plot_results(predictions, y_test)

    plot_results(real_pred, real_y)

    print(lossesMINE)
    print(lossesKERAS)
Пример #18
0
class Classifier(object):

    def __init__(self, dataset):
        # self.imagegen = ImageDataGenerator(shear_range=0.2,zoom_range=0.2,horizontal_flip=True)
        self.dataset = dataset
        self.train_x,self.train_y = dataset[0]
        self.test_x,self.test_y = dataset[1]
        self.learning_rate = 0.16
        self.eps = 2e-9
        self.params = {}
        self.model = Model()

    def error(self, Y_actual, Y_output):
        Y = Y_actual - Y_output
        print(Y)
        error = (1/Y.shape[0])*np.sum(Y*Y.T)
        return error

    def compute_gradients(self, cost, parameters):
        grads = {}
        grads["dW1"] = T.grad(cost,parameters["W1"])
        grads["db1"] = T.grad(cost,parameters["b1"])
        grads["dW2"] = T.grad(cost,parameters["W2"])
        grads["db2"] = T.grad(cost,parameters["b2"])
        grads["dW3"] = T.grad(cost,parameters["W3"])
        grads["db3"] = T.grad(cost,parameters["b3"])
        grads["dW5"] = T.grad(cost,parameters["W5"])
        grads["dW6"] = T.grad(cost,parameters["W6"])

        return grads

    def train(self):
        parameters = None
        num_epoch = 1
        while num_epoch > 0:
            # mini_batch_iter = self.imagegen.flow(self.train_x,self.train_y,batch_size=16)

            train_batch_inputs = np.array_split(self.train_x, 16)
            train_batch_labels = np.array_split(self.train_y, 16)
            train_batch_list = zip(train_batch_inputs,train_batch_labels)
            for train_batch in train_batch_list:
                # sh = self.model.shape_dim(train_batch[0],train_batch[1])
                cost = self.model.train(train_batch[0],train_batch[1])
                if current_data%100 == 0:
                    print(str(current_data*16)+" datas trained")
                if current_data == len(self.train_x):
                    break
                current_data += 1
            # error = self.model.test(self.test_x.astype(theano.config.floatX),self.test_y)
            # print("Epoch "+str(2-num_epoch)+" done")
            # print("Error: "+str(error))
            num_epoch -= 1
        self.params = self.model.parameters()
        saveModel(self.params)

    def test(self, parameters = None):
        error = self.model.test(self.test_x.astype(theano.config.floatX), self.test_y)
        return error
        # error = self.error(self.test_y,Y_predict)
        # return error

    def cost_function(self, y_predict, y_label):
        return y_label*np.log(y_predict) + (1-y_label)*np.log(1-y_predict)
Пример #19
0
def main(args):

    config_params = read_params(args.config_fpath)

    if args.gpu < 0:
        cuda = False
    else:
        cuda = True
        torch.cuda.set_device(args.gpu)

    print('*** Create data loader ***')
    dataloader, val_dataloader, test_dataloader = make_data_loader(
        args.batch_size, dataset_name='Letter-low', cuda=cuda)

    print('*** Create model ***')
    model = Model(config=config_params, verbose=True, cuda=cuda)
    if cuda:
        model.cuda()

    # optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    # loss function
    loss_fcn = torch.nn.CrossEntropyLoss()

    # Start training
    print('*** Start training ***')
    step = 0
    model.train()
    losses = []
    for epoch in range(args.n_epochs):
        for iter, (graphs, labels) in enumerate(dataloader):

            # forward pass
            logits = model(graphs)

            # compute loss
            loss = loss_fcn(logits, labels)
            losses.append(loss.item())

            # backpropagate
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # testing
            step += 1
            if step % args.eval_every == 0:
                val_loss, val_acc = test(val_dataloader, model, loss_fcn)
                print(
                    "Step {:05d} | Train loss {:.4f} | Over {} | Val loss {:.4f} |"
                    "Val acc {:.4f}".format(
                        step,
                        np.mean(losses),
                        len(losses),
                        val_loss,
                        val_acc,
                    ))
                model.train()

    print('*** Start Testing ***')
    test_loss, test_acc = test(test_dataloader, model, loss_fcn)
    print("Test loss {:.4f} | Test acc {:.4f}".format(test_loss, test_acc))
Пример #20
0
from core.model import Model

configs = json.load(open('config.json', 'r'))

data = DataLoader(os.path.join('data', configs['data']['filename']),
                  configs['data']['train_test_split'],
                  configs['data']['columns'])

model = Model()
model.build_model(configs)
x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                           normalise=configs['data']['normalise'])

model.train(x,
            y,
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            save_dir=configs['model']['save_dir'])

x_test, y_test = data.get_test_data(seq_len=configs['data']['sequence_length'],
                                    normalise=configs['data']['normalise'])


def predict_point_by_point(self, data):
    #Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
    predicted = self.model.predict(data)
    predicted = np.reshape(predicted, (predicted.size, ))
    return predicted


def predict_sequence_full(self, data, window_size):
def main():
    configs = json.load(open('point_to_point_similar_sinewave.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])

    data = DataLoader(
        os.path.join('data', configs['data']['filename']),
        configs['data']['train_test_split'],
        configs['data']['columns']
    )

    model = Model()
    model.build_model(configs)
    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )


    # in-memory training
    model.train(
		x,
		y,
		epochs = configs['training']['epochs'],
		batch_size = configs['training']['batch_size'],
		save_dir = configs['model']['save_dir']
	)

    # out-of memory generative training
    # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
    # model.train_generator(
    #     data_gen=data.generate_train_batch(
    #         seq_len=configs['data']['sequence_length'],
    #         batch_size=configs['training']['batch_size'],
    #         normalise=configs['data']['normalise']
    #     ),
    #     epochs=configs['training']['epochs'],
    #     batch_size=configs['training']['batch_size'],
    #     steps_per_epoch=steps_per_epoch,
    #     save_dir=configs['model']['save_dir']
    # )

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise']
    )

    # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])
    # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])
    predictions = model.predict_point_by_point(x_test)

    predictions = predictions.reshape(-1, 1)
    normalized_test = data.get_normalized_test()
    normalized_test = np.delete(normalized_test, [j for j in range(len(predictions), len(normalized_test))], axis=0)
    my_normalized_test = np.delete(normalized_test, [0], axis=1)
    # my_normalized_test = np.delete(my_normalized_test, [j for j in range(len(predictions), len(my_normalized_test))], axis=0)

    final_data = np.hstack((predictions, my_normalized_test))
    actual_predictions = data.inverse_data(final_data)
    predictions = actual_predictions[:, 0]
    actual_test = data.inverse_data(normalized_test)
    y_test = actual_test[:, 0]
    # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
    plot_results(predictions, y_test)

    mse = mean_squared_error(y_test, predictions)
    print("Mean Squared Error: " + str(mse))

    print("Root Mean Squared Error: " + str(math.sqrt(mse)))

    mae = mean_absolute_error(y_test, predictions)
    print("Mean Absolute Error: " + str(mae))

    r2 = r2_score(y_test, predictions)
    print("R Squared Error: " + str(r2))
Пример #22
0
from matplotlib import pyplot as plt
from core.model import Model
from core.data_processor import DataProcessor

# 数据预处理
dp = DataProcessor('./data/AAPL_080112_200112.csv', (85, 0, 15),
                   ['Adj Close', 'Volume'])
data_x, data_y = dp.get_data(50, 'train')
test_x, test_y = dp.get_data(50, 'test')

# 训练模型
model = Model()
model.build()
model.train(data_x, data_y, epochs=10, batch_size=32, save_dir='./saved_model')

# 预测
predicted = model.predict_sequence_multiple(test_x,
                                            window_size=50,
                                            prediction_len=50)
plt.plot(test_y, label='True Data')
for i, data in enumerate(predicted):
    padding = [None for p in range(i * 50)]
    plt.plot(padding + data, label='Prediction')
    plt.legend()
plt.show()
Пример #23
0
import numpy as np
from core.model import Model
from layers.input import Input
from layers.dense import Dense
from util.cost_functions import L2

if __name__ == '__main__':
    # demo MLP
    data_x = np.array([1, 2])
    data_y = np.array([0.2, 0.4])

    train_x = np.reshape(data_x, (len(data_x), 1, 1))
    train_y = np.reshape(data_y, (len(data_y), 1, 1))

    model = Model()
    model.add(Input(1))
    model.add(Dense(3))
    model.add(Dense(1))
    model.compile(cost=L2(),
                  optimizer='sgd',
                  num_epochs=30000,
                  batch_size=1,
                  lr=0.1)
    model.train(train_x, train_y)

    test_data_x = np.array([1])
    test_x = np.reshape(test_data_x, (len(test_data_x), 1))
    print model.predict(test_x)
Пример #24
0
def main():
    configs = json.load(open('configcrops.json', 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])

    #filename1 can be changed into filename (see the configcrops.json)
    namaefile = configs['data']['filename1']

    with open(namaefile, 'r') as dataframe:
        hasil = json.load(dataframe)
    # print(hasil)

    temp = []
    listhasil = []
    for key, value in hasil.items():
        temp = [key, value]
        listhasil.append(temp)

    listkota = [
        'Kulon Progo', 'Bantul', 'Gunung Kidul', 'Sleman', 'DIY', 'Bandung',
        'Tasikmalaya', 'Majalengka', 'Cirebon', 'Kuningan', 'Garut',
        'Sumedang', 'Cianjur', 'Subang', 'Purwakarta', 'Indramayu', 'Ciamis',
        'Sukabumi', 'Bogor', 'Bekasi', 'Karawang'
    ]
    kodekota = [
        'KLP', 'BTL', 'GKD', 'SLM', 'DIY', 'BD', 'TKM', 'MJK', 'CRB', 'KNG',
        'GRT', 'SMD', 'CJR', 'SBG', 'PWK', 'IDY', 'CMS', 'SKB', 'BGR', 'BKS',
        'KRW'
    ]
    listtahun = []
    for i in range(1961, 2015):
        listtahun.append(str(i))

    data = []
    #listhasil
    #=[["Kulon Progo",{data tahun dan crops},"DIY",{data tahun dan crops}]
    datacrops = []
    datalengkapcrops = []
    datalengkaptahun = []
    datatahun_semuadaerah = []

    #Variabel untuk tampung data csv
    kota_untuk_csv = []  #pembuatan kolom kota pada csv
    kode_untuk_csv = []  #pembuatan kolom kode untuk csv
    tahun_untuk_csv = []  #pembuatan kolom tahun untuk csv
    crops_untuk_csv = []  #pembuatan kolom crops untuk csv
    RMSE_untuk_csv = []

    semua_data_csv = [
    ]  #untuk menampung data kota,kode,tahun, dan crops beserta rmse pada sebaris (tidak digunakan #cadangan)

    #Pengulangan compiling per kota
    for j in range(len(listkota)):
        if (len(listhasil[j][1]) != len(listtahun)):
            jlhprediksi = len(listtahun) - len(
                hasil[listkota[j]]) + (6) - 1  #prediksi sampai 2020
        else:
            jlhprediksi = (6) - 1  #prediksi sampai 2020

        datatahun_crops = listhasil[j][1]  #dapat data json crops dan tahun
        datatahun = list(
            datatahun_crops.keys())  #dapat data tahun pada satu daerah
        datatahunint = [int(x) for x in datatahun]  #konversi ke integer

        arraytahun = np.array(datatahunint)  #dibuat jadi array
        sorttahun = np.sort(arraytahun)  #sort dalam bentuk array
        datatahun_daerah = list(sorttahun)  #buat lagi jadi list
        datalengkaptahun.append(datatahun_daerah)

        for n in range(len(
                listhasil[j][1])):  #listhasil[j][1] = data tahun dan crops
            datacrops.append(float(listhasil[j][1][str(datatahun_daerah[n])]))
        datalengkapcrops.append(datacrops)
        datacrops = []

    # print(datalengkapcrops)
    # print(datalengkaptahun)
    listcrops_daerah = []
    hasillistcrops_daerah = []

    for i in range(len(listkota)):
        for j in range(len(datalengkapcrops[i])):
            listcrops_daerah.append([datalengkapcrops[i][j]
                                     ])  #pecah per tahun crops dalam satu list
        hasillistcrops_daerah.append(listcrops_daerah)
        listcrops_daerah = []

    # for i in range(len(listkota)):
    #     arraycrops_semua=np.array(hasillistcrops_daerah[i])
    # print(arraycrops_semua[0:20])

    arraycrops_semua = np.array(hasillistcrops_daerah)

    for i in range(len(listkota)):
        data = DataLoader(np.array(arraycrops_semua[i]),
                          configs['data']['train_test_split'])

        model = Model()
        model.build_model(configs)
        x, y = data.get_train_data(seq_len=configs['data']['sequence_length'],
                                   normalise=configs['data']['normalise'])

        # in-memory training
        model.train(x,
                    y,
                    epochs=configs['training']['epochs'],
                    batch_size=configs['training']['batch_size'],
                    save_dir=configs['model']['save_dir'])

        # Yogyakarta: Kulon progo, bantul, gunung kidul, sleman, DIY
        # Jawa Barat: Bandung, Tasikmalaya, Majalengka, Cirebon, Kuningan, Garut, Sumedang, Cianjut, Subang, Purwakarta, Indramayu
        # Ciamis, Sukabumi, Bogor, Bekasi, Karawang

        # # out-of memory generative training
        # steps_per_epoch = math.ceil((data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
        # model.train_generator(
        #     data_gen=data.generate_train_batch(
        #         seq_len=configs['data']['sequence_length'],
        #         batch_size=configs['training']['batch_size'],
        #         normalise=configs['data']['normalise']
        #     ),
        #     epochs=configs['training']['epochs'],
        #     batch_size=configs['training']['batch_size'],
        #     steps_per_epoch=steps_per_epoch,
        #     save_dir=configs['model']['save_dir']
        # )

        # # save_dir = configs['model']['save_dir']

        x_test, y_test = data.get_test_data(
            seq_len=configs['data']['sequence_length'],
            normalise=configs['data']['normalise'])

        # print(x_test)
        # print(y_test)

        # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'], configs['data']['sequence_length'])

        predictions_point = model.predict_point_by_point(x_test)
        print(len(predictions_point))

        for ulang in range(len(datalengkaptahun[i]) - len(predictions_point)):
            datalengkaptahun[i].remove(
                datalengkaptahun[i]
                [ulang])  #for equality number of ground truth and prediction

        # Use the plot when you want to see the data graphically
        # plot_results(predictions_point, y_test,datalengkaptahun[i],listkota[i])

        groundtrue = data._groundtruths(1)
        groundtrue = (groundtrue.ravel())
        # print(len(groundtrue))

        #Measure the RMSE
        RMSElist = []
        for k in range(len(predictions_point)):
            errorrate = groundtrue[k + ulang] - predictions_point[k]
            hasilkuadrat = errorrate * errorrate
            RMSElist.append(hasilkuadrat)
        RMSE = sum(RMSElist) / (len(predictions_point))
        RMSE = RMSE**(1 / 2)
        # print(RMSE)

        getdataforecast = data._forecasting(jlhprediksi, jlhprediksi)
        # print(len(getdataforecast))

        total_prediksi = jlhprediksi
        takefrom = jlhprediksi
        forecast_result = model.forecast(total_prediksi, getdataforecast,
                                         takefrom)
        # print(len(forecast_result))
        # print(forecast_result[0])
        # forecast_result=np.append(forecast_result,[0.0])
        # print(forecast_result)

        n_steps = 8
        # split into samples
        X, y = split_sequence(forecast_result, n_steps)
        # reshape from [samples, timesteps] into [samples, timesteps, features]
        n_features = 1
        # print(X)
        X = X.reshape((X.shape[0], X.shape[1], n_features))
        # define model
        model = Sequential()
        model.add(
            LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
        model.add(Dense(1))
        model.compile(optimizer='adam', loss='mse')
        # fit model
        model.fit(X, y, epochs=200, verbose=0)

        #make the number of predictions is equal to the number of the ground truth
        hasilprediksi = []
        hasilprediksi.append(groundtrue[-(ulang + 1):-ulang])
        hasilprediksi.append(groundtrue[-ulang:])

        for j in range(total_prediksi):
            getxlastnumber = array(forecast_result[(-n_steps - 1):-1])
            x_input = getxlastnumber
            # print(x_input)

            x_input = x_input.reshape((1, n_steps, n_features))
            yhat = model.predict(x_input, verbose=0)
            # print(yhat[0][0])

            hasilprediksi.append(yhat[0])  #untuk dikirimkan ke json
            forecast_result = np.append(forecast_result,
                                        yhat[0])  #untuk training forecast
            groundtrue = np.append(groundtrue,
                                   yhat[0])  #untuk plotting ke grafik

            # print(len(groundtrue))
            # prediction_point=np.append(prediction_point,yhat[0])
        # print(hasilprediksi)      #hasilprediksi dalam bentuk array, hasilprediksi[0] dalam bntk list, hasilprediksi[0][0] dalam bentuk skalar

        semuatahun = datalengkaptahun[i]
        tahunbaru = []
        terakhirtahun = datalengkaptahun[i][len(datalengkaptahun[i]) - 1]

        rangetahun_input = len(groundtrue) - len(datalengkaptahun[i])
        # print(rangetahun_input)

        if (len(datalengkaptahun[i]) < len(groundtrue)):
            for z in range(rangetahun_input):
                semuatahun.append(terakhirtahun)  #untuk grafik
                tahunbaru.append(terakhirtahun)  #untuk dikirimkan ke json
                terakhirtahun = terakhirtahun + 1
        # print(tahunbaru)

        # Use the plot when you want to see the data graphically
        # plot_results_onlypredicted(semuatahun,groundtrue,listkota[i])

        #To check the length of ground true is equal to the datalengkaptahun[i] or the number of years record at specific Entity
        # print(len(groundtrue))
        # print(len(datalengkaptahun[i]))

        # semuahasil_csv=[]
        # csv_data_kota=duplikathasil.get(column).values[:]

        #To record all data into LIST to make CSV
        for jlh in range(len(semuatahun)):
            kota_untuk_csv.append(listkota[i])
            kode_untuk_csv.append(kodekota[i])
            RMSE_untuk_csv.append(RMSE[0])
            tahun_untuk_csv.append(semuatahun[jlh])
            crops_untuk_csv.append(groundtrue[jlh])

        #Alternative solution for csv
        # for jlh in range(len(datalengkapcrops[i])):
        #     kota_untuk_csv.append(listkota[i])
        #     kode_untuk_csv.append(kodekota[i])
        #     # tahun_untuk_csv.append(datalengkaptahun[i][jlh])
        #     # crops_untuk_csv.append(datalengkapcrops[i][jlh])
        #     RMSE_untuk_csv.append(RMSE[0])

        # for jlh in range (rangetahun_input):
        #     kota_untuk_csv.append(listkota[i])
        #     kode_untuk_csv.append(kodekota[i])
        #     # tahun_untuk_csv.append(tahunbaru[jlh])
        #     # crops_untuk_csv.append(hasilprediksi[jlh][0])
        #     RMSE_untuk_csv.append(RMSE[0])

        HasilCSV = {
            'Entity': kota_untuk_csv,
            'Code': kode_untuk_csv,
            'Year': tahun_untuk_csv,
            ' crop(tonnes per hectare)': crops_untuk_csv,
            'RMSE': RMSE_untuk_csv
        }
        df = DataFrame(HasilCSV,
                       columns=[
                           'Entity', 'Code', 'Year',
                           ' crop(tonnes per hectare)', 'RMSE'
                       ])
        print(df)

        filebaca_csv = configs["data"]["newcsv"]
        filebaca_csv1 = configs["data"]["newcsv1"]

        #name of data export can be changed through configcrops.json (variable filebaca_csv and filebacacsv1 only for explanation)
        export_csv = df.to_csv(
            r'/home/biovick/Downloads/tkte/sudiro/Forecasting-and-Predicting Crops into Visualization/data/newTomatov2.csv',
            index=False)