Пример #1
0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="lstm", help="Model to train.")
    parser.add_argument("--lag", default="12", help="specify number of lags")
    parser.add_argument("--file_name",
                        default="970_1_data.csv",
                        help="Csv file name")
    args = parser.parse_args()

    lag = int(args.lag)
    config = {"batch": 256, "epochs": 600}  # training config
    file = args.file_name
    model_name = os.path.splitext(file)[0]
    X_train, y_train, _, _, _ = process_data(file, lag)

    if args.model == 'lstm':
        m = model.get_lstm(
            [[3, lag], 64, 64,
             1])  # changing the input to 3, lags for rain data model
        train_model(m, X_train, y_train, "{}_lstm".format(model_name), config)
    if args.model == 'gru':
        m = model.get_gru([[3, lag], 64, 64, 1])
        train_model(
            m, X_train, y_train, "{}_gru".format(model_name),
            config)  # changing the input to 3, lags for rain data model
Пример #2
0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", default="lstm",
                        help="Model to train.")  #model name
    parser.add_argument("--lag", default="12", help="lags")  # number of lags
    parser.add_argument("--file_name",
                        default="970_1_data.csv",
                        help="Csv file name")  # file name of the csv
    args = parser.parse_args()

    lag = int(args.lag)
    config = {"batch": 256, "epochs": 600}
    file = args.file_name
    X_train, y_train, _, _, _ = process_data(file, lag)

    if args.model == 'lstm':  # training lstm model
        X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
        m = model.get_lstm([lag, 64, 64, 1])
        train_model(m, X_train, y_train, args.model, config)
    if args.model == 'gru':  # training gru model
        X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
        m = model.get_gru([lag, 64, 64, 1])
        train_model(m, X_train, y_train, args.model, config)
    if args.model == 'saes':  # training saes model
        X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1]))
        m = model.get_saes([lag, 400, 400, 400, 1])
        train_seas(m, X_train, y_train, args.model, config)
Пример #3
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("--lag", default="12", help="lags")
    parser.add_argument("--model", default="970_1_data.csv", help="csv file name for the model")   
    args = parser.parse_args()
    lag = int(args.lag)
    model_name = args.model
    model_name = os.path.splitext(model_name)[0] # getting model name

    # load all the models
    lstm = load_model('model/lstm.h5')
    gru = load_model('model/gru.h5')
    saes = load_model('model/saes.h5')
    knn = pickle.load(open('model/knn.model', 'rb'))
    xgboost = xgb.Booster({'nthread': 4})  # init model
    xgboost.load_model('model/xgb.model')
    models = [lstm, gru, saes,knn,xgboost]
    names = ['LSTM', 'GRU', 'SAEs','KNN','Xgboost']

    lag = 12
    file = '970_1_data.csv'
    _, _, X_test, y_test, scaler = process_data(file, lag)
    print(y_test.shape)
    y_test = scaler.inverse_transform(y_test.reshape(-1, 1)).reshape(1, -1)[0]
    print(y_test.shape)

    y_preds = []
    for name, model in zip(names, models):
         _, _, X_test, _,_ = process_data(file, lag)
        if name == 'SAEs':
            X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1]))
        elif name == 'GRU' or name == 'LSTM':
            X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
        elif name == 'Xgboost':
            X_test = xgb.DMatrix(X_test)
        # file = 'images/' + name + '.png'
        # plot_model(model, to_file=file, show_shapes=True)
        predicted = model.predict(X_test)
        predicted = scaler.inverse_transform(predicted.reshape(-1, 1)).reshape(1, -1)[0]
        y_preds.append(predicted[:384]) # append all the prediction results
        print(name)
        eva_regress(y_test, predicted)
Пример #4
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("--lag", default="12", help="lags")
    parser.add_argument("--model", default="970_1_data.csv", help="lags")   
    args = parser.parse_args()
    lag = int(args.lag)
    model_name = args.model
    model_name = os.path.splitext(model_name)[0]

    lstm = load_model('model/{}_lstm.h5'.format(model_name))
    # gru = load_model('model/{}_gru.h5'.format(args.model))
    # saes = load_model('model/{}_saes.h5'.format(args.model))
    models = [lstm]
    names = ['LSTM']

    lag = 12
    file = '{}.csv'.format(model_name)
    _, _, X_test, y_test, scaler = process_data(file, lag)
    minimum = scaler.data_min_
    maximum = scaler.data_max_
    scale = scaler.scale_
    scaled = (maximum * (scale * X_test[0]  - minimum * scale))
    unscalaed = (scaled + (minimum * scale))/scale
    y_test = scaler.inverse_transform(y_test.reshape(-1, 1)).reshape(1, -1)[0]
    X_test_2 = scaler.inverse_transform(X_test.reshape(-1, 1)).reshape(1, -1)[0]
    print(X_test[0].shape)

    with open('scaler_data/{}.txt'.format(model_name), 'w') as output:
        output.write('{},{},{}'.format(scale[0],minimum[0],maximum[0]))

    # print(y_test.shape)

    y_preds = []
    for name, model in zip(names, models):
        # if name == 'SAEs':
        #     # X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1]))
        # else:
        #     X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
        file = 'images/' + name + '.png'
        plot_model(model, to_file=file, show_shapes=True)
        predicted = model.predict(X_test)
        predicted = scaler.inverse_transform(predicted.reshape(-1, 1)).reshape(1, -1)[0]
        y_preds.append(predicted[:384])
        print(name)
        eva_regress(y_test, predicted)

    plot_results(y_test[:384], y_preds, names)
Пример #5
0
def main():

    lstm = []
    y_preds = []
    for idx in range(4, 13):  #training logs from value 4 to 12
        lstm.append("lstm_{}.h5".format(idx))
        model = load_model('model/{}'.format(lstm[idx - 4]))

        file = '970_1_data.csv'
        _, _, X_test, y_test, scaler = process_data(file, idx)
        y_test = scaler.inverse_transform(y_test.reshape(-1, 1)).reshape(
            1, -1)[0]  #scaling the model from 0,1 to orignal value

        X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
        predicted = model.predict(X_test)
        predicted = scaler.inverse_transform(predicted.reshape(-1, 1)).reshape(
            1, -1)[0]
        y_preds.append(
            predicted[:384])  # adding all the lags model prediction to y_preds
        print(lstm[idx - 4])
        eva_regress(y_test, predicted)

    plot_results(y_test[:384], y_preds, lstm)