def main(argv): parser = argparse.ArgumentParser() parser.add_argument( "--model", default="lstm", help="Model to train.") args = parser.parse_args() lag = 12 config = {"batch": 256, "epochs": 600} file1 = 'data/train.csv' file2 = 'data/test.csv' X_train, y_train, _, _, _ = process_data(file1, file2, lag) if args.model == 'lstm': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_lstm([12, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'gru': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_gru([12, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'saes': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1])) m = model.get_saes([12, 400, 400, 400, 1]) train_seas(m, X_train, y_train, args.model, config)
def main(argv): parser = argparse.ArgumentParser() parser.add_argument("--model", default="ann", help="Model to train.") args = parser.parse_args() lag = 6 config = {"batch": 256, "epochs": 200} file1 = 'data/compareData/train_data_compare(after6am).csv' file2 = 'data/compareData/test_data_cluster0_compare(after6am).csv' X_train, y_train, X_test, y_test, scaler = process_data(file1, file2, lag) if args.model == 'lstm': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_lstm([6, 100, 100, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'ann': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1])) m = model.get_ann([6, 20, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'gru': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_gru([6, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'saes': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1])) m = model.get_saes([6, 100, 100, 100, 1]) train_seas(m, X_train, y_train, args.model, config)
def main(argv): parser = argparse.ArgumentParser() parser.add_argument( "--model", default="saes", help="Model to train.") args = parser.parse_args() lag = 12 config = {"batch": 256, "epochs": 50} file1 = '/home/VICOMTECH/icejudo/PROYECTOS/MiRepoGithub/MLflow-dvc-data/data/train.csv' file2 = '/home/VICOMTECH/icejudo/PROYECTOS/MiRepoGithub/MLflow-dvc-data/data/test.csv' X_train, y_train, _, _, _ = process_data(file1, file2, lag) if args.model == 'lstm': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_lstm([12, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'gru': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_gru([12, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'saes': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1])) m = model.get_saes([12, 400, 400, 400, 1]) train_seas(m, X_train, y_train, args.model, config)
def main(argv): parser = argparse.ArgumentParser() parser.add_argument( "--model", default="lstm", help="Model to train.") args = parser.parse_args() config = {"batch": 64, "epochs": 150} X_train, y_train, _, _, _ = data_process(12) if args.model == 'lstm': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_lstm([12, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'gru': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_gru([12, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'saes': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1])) m = model.get_saes([12, 400, 400, 400, 1]) train_seas(m, X_train, y_train, args.model, config) if args.model == 'bilstm': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_bilstm([12, 64, 1]) train_model(m, X_train, y_train, args.model, config)
def main(argv): parser = argparse.ArgumentParser() parser.add_argument("--model", default="lstm", help="Model to train.") #model name parser.add_argument("--lag", default="12", help="lags") # number of lags parser.add_argument("--file_name", default="970_1_data.csv", help="Csv file name") # file name of the csv args = parser.parse_args() lag = int(args.lag) config = {"batch": 256, "epochs": 600} file = args.file_name X_train, y_train, _, _, _ = process_data(file, lag) if args.model == 'lstm': # training lstm model X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_lstm([lag, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'gru': # training gru model X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_gru([lag, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'saes': # training saes model X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1])) m = model.get_saes([lag, 400, 400, 400, 1]) train_seas(m, X_train, y_train, args.model, config)
def main(argv): parser = argparse.ArgumentParser() parser.add_argument( "--model", default="bidirectional", help="Model to train.") args = parser.parse_args() lag = 4 # how far to look back config = {"batch": 256, "epochs": 15 } file1 = './data/train1.csv' file2 = './data/test1.csv' X_train, y_train, _, _, _ = process_data(file1, file2, lag) if args.model == 'lstm': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_lstm([4, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'bidirectional': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_bidirectional([4, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'simplernn': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_simplernn([4, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'gru': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) m = model.get_gru([4, 64, 64, 1]) train_model(m, X_train, y_train, args.model, config) if args.model == 'saes': X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1])) m = model.get_saes([4, 400, 400, 400, 1]) train_seas(m, X_train, y_train, args.model, config)
def generate_new_model(model_to_train, input_shape): if model_to_train == 'seas': m = model.get_saes(input_shape, [400, 400, 400, 1]) elif model_to_train == 'lstm': m = model.get_lstm(input_shape, [64, 64, 1]) if model_to_train == 'gru': m = model.get_gru(input_shape, [64, 64, 1]) if model_to_train == "feedfwd": m = model.get_feed_fwd(input_shape, [64, 1]) if model_to_train == "deepfeedfwd": m = model.get_deep_feed_fwd(input_shape, [64, 128, 64, 1]) return m
def main(target_model): config = {"batch": 10, "epochs": 25000} train = 'data/train/' test = 'data/test/' X_train, y_train, _, _ = process_data(train, test) if target_model == 'lstm': m = model.get_lstm([58, 64, 64, 2]) train_model(m, X_train, y_train, target_model, config) if target_model == 'gru': m = model.get_gru([58, 64, 64, 2]) train_model(m, X_train, y_train, target_model, config) if target_model == 'saes': X_train = np.reshape(X_train, (X_train.shape[0], -1)) m = model.get_saes([4800, 400, 400, 400, 2]) train_seas(m, X_train, y_train, target_model, config)
def main(argv): parser=argparse.ArgumentParser() parser.add_argument( '--model', default='lstm', help='Model to train') args=parser.parse_args() lag=12 config={"batch": 256,"epochs":500} path=r'D:\data\2018_data\process_data\M30f.csv' if args.model in ['lstm','gru','saes']: X_train,y_train,_,_,_=process_data(path,lag) if args.model == 'merge_lstm': lag = [12, 96, 672] X_train, y_train, _, _, _ =min_day_week_data(path,lag) # X_train = np.reshape(X_train,[X_train.shape[0], X_train.shape[1], 1]) m = model.merge_lstm([12,24,7,1]) train_merge_lstm(m, X_train, y_train, args.model, config) if args.model=='lstm': X_train=np.reshape(X_train,[X_train.shape[0],X_train.shape[1],1]) m=model.get_lstm([12,64,128,1]) train_model(m,X_train,y_train,args.model,config) if args.model=='gru': X_train=np.reshape(X_train,[X_train.shape[0],X_train.shape[1],1]) m=model.get_gru([12,64,128,1]) train_model(m,X_train,y_train,args.model,config) if args.model=='saes': X_train=np.reshape(X_train,[X_train.shape[0],X_train.shape[1]]) m=model.get_saes([12,400,400,400,1]) train_seas(m,X_train,y_train,args.model,config) if args.model=='cnn': lag=96 X_train,y_train,_,_,_=cnn_data(path,lag) X_train=np.reshape(X_train,[X_train.shape[0],X_train.shape[1],X_train.shape[2],1]) # y_train=np.reshape(y_train,[y_train.shape[0],y_train.shape[1],1]) m=model.get_cnn([68,96,16,32,64]) train_model(m,X_train,y_train,args.model,config)