def main(args): mode = args.mode # mode = "test" codes = ["600036"] # codes = ["600036", "601998"] # codes = args.codes # codes = ["AU88", "RB88", "CU88", "AL88"] market = args.market # train_steps = args.train_steps train_steps = 30000 # training_data_ratio = 0.98 training_data_ratio = args.training_data_ratio env = Market(codes, start_date="2008-01-01", end_date="2018-01-01", **{ "market": market, "use_sequence": True, "scaler": MinMaxScaler, "mix_index_state": True, "training_data_ratio": training_data_ratio, }) model_name = os.path.basename(__file__).split('.')[0] algorithm = Algorithm(tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": mode, "hidden_size": 5, "enable_saver": True, "train_steps": train_steps, "enable_summary_writer": True, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), }) algorithm.run() algorithm.eval_and_plot()
def main(args): env = Market(args.codes, **{"use_sequence": True}) algorithm = Algorithm( tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": args.mode, # "mode": "test", "save_path": os.path.join(CHECKPOINTS_DIR, "SL", "DualAttnRNN", "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", "DualAttnRNN", "summary"), "hidden_size": 5, "enable_saver": True, "enable_summary_writer": True }) algorithm.run() algorithm.eval_and_plot()
def main(args): env = Market(args.codes) algorithm = Algorithm( tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{ "mode": args.mode, # "mode": "test", "episodes": args.episode, "save_path": os.path.join(CHECKPOINTS_DIR, "RL", "DuelingDQN", "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "RL", "DuelingDQN", "summary"), "enable_saver": True, "enable_summary_writer": True }) algorithm.run() algorithm.eval() algorithm.plot()
def main(args): mode = args.mode # mode = 'test' codes = ["SH_index_all"] # codes = ["600036", "601998"] # codes = args.codes # codes = ["AU88", "RB88", "CU88", "AL88"] market = args.market # market = 'future' # train_steps = args.train_steps train_steps = 30000 # training_data_ratio = 0.98 training_data_ratio = args.training_data_ratio env = Market(codes, start_date="2008-01-02", end_date="2019-03-18", **{ "market": market, "use_sequence": True, "seq_length": 3, "scaler": MinMaxScaler(feature_range=(0, 1)), "mix_index_state": True, "training_data_ratio": training_data_ratio, }) model_name = os.path.basename(__file__).split('.')[0] algorithm = Algorithm( tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": mode, "hidden_size": 64, "layer_size": 64, "enable_saver": True, "keep_prob": 0.8, "train_steps": train_steps, "enable_summary_writer": True, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), }) algorithm.run() algorithm.eval_and_plot_backtest(code=codes[0], model_name=model_name)
def main(args): mode = args.mode # mode = 'test' # codes = args.codes codes = ["600036"] # codes = ["AU88", "RB88", "CU88", "AL88"] # codes = ["T9999"] market = args.market # market = 'future' # episode = args.episode episode = 200 # training_data_ratio = 0.5 training_data_ratio = args.training_data_ratio model_name = os.path.basename(__file__).split('.')[0] env = Market( codes, start_date="2008-01-01", end_date="2019-07-19", **{ "market": market, # "use_sequence": True, # "mix_index_state": True, "logger": generate_market_logger(model_name), "training_data_ratio": training_data_ratio, }) algorithm = Algorithm( tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{ "mode": mode, "episodes": episode, "enable_saver": True, "learning_rate": 0.003, "enable_summary_writer": True, "logger": generate_algorithm_logger(model_name), "save_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "summary"), }) algorithm.run() algorithm.eval() algorithm.plot()
def market_setup_sl(args, strategy): codes = ["SH_index"] market = args.market return Market(codes, start_date="2006-10-09", end_date="2019-02-27", **{ "pre_process_strategy": strategy, "market": market, "use_sequence": True, "seq_length": 10, "scaler": MinMaxScaler(feature_range=(0, 1)), "mix_index_state": True, "training_data_ratio": 0.8, })
def main(args): mode = args.mode # mode = "test" codes = ["SH_index"] market = args.market # train_steps = args.train_steps # train_steps = 5000 train_steps = 1000 # training_data_ratio = 0.98 training_data_ratio = args.training_data_ratio env = Market(codes, start_date="2006-10-09", end_date="2019-03-18", **{ "market": market, "use_sequence": True, "seq_length": 20, "scaler": MinMaxScaler(feature_range=(0, 1)), "mix_index_state": True, "training_data_ratio": training_data_ratio, }) model_name = os.path.basename(__file__).split('.')[0] print(os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model")) print(os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary")) algorithm = Algorithm( tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": mode, "hidden_size": 12, "layer_size": 2, "enable_saver": False, "train_steps": train_steps, "enable_summary_writer": False, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), }) algorithm.run() algorithm.eval_and_plot_backtest(code=codes[0], model_name=model_name)
def test_nasdaq(self): mode = self.args.mode # mode = "test" codes = ["nasdaq"] market = self.args.market # train_steps = args.train_steps # train_steps = 5000 train_steps = 30000 # training_data_ratio = 0.98 training_data_ratio = self.args.training_data_ratio env = Market(codes, start_date="2008-01-02", end_date="2019-02-01", **{ "market": market, "use_sequence": True, "scaler": MinMaxScaler(feature_range=(0, 1)), "mix_index_state": True, "training_data_ratio": training_data_ratio, }) model_name = os.path.basename(__file__).split('.')[0] algorithm = Algorithm( tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": mode, "hidden_size": 48, "learning_rate": 0.0001, "enable_saver": True, "train_steps": train_steps, "enable_summary_writer": True, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), }) algorithm.run() algorithm.eval_and_plot_nasdaq_backtest()
def main(args): mode = args.mode #mode = 'test' codes = args.codes #codes = ["600036"] #codes = ["eos_usdt"] # codes = ["600036", "601998"] # codes = ["AU88", "RB88", "CU88", "AL88"] # codes = ["T9999"] market = 'k15m' #market = args.market # market = 'future' # episode = args.episode episode = 1000 training_data_ratio = 0.95 # training_data_ratio = args.training_data_ratio #pdb.set_trace() model_name = os.path.basename(__file__).split('.')[0] #env = Market(codes, start_date="2018-06-04", end_date="2018-06-12", **{ env = Market(codes, start_date=args.start, end_date=args.end, **{ "market": market, "mix_index_state": False, "logger": generate_market_logger(model_name), "training_data_ratio": training_data_ratio, }) algorithm = Algorithm(tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{ "mode": mode, "episodes": episode, "enable_saver": True, "learning_rate": 0.003, "enable_summary_writer": True, "logger": generate_algorithm_logger(model_name), "save_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "summary"), }) algorithm.run() algorithm.eval() algorithm.plot()
def main(args): # mode = args.mode mode = 'train' # codes = ["600036"] # codes = ["601398", "000651", "601998", "000002"] codes = ["600036", "601328", "601998", "601398"] # codes = args.codes # codes = ["AU88", "RB88", "CU88", "AL88"] market = args.market # 调用args_parser.py中的model_launcher_parser对象中的参数“stock”,指定为股票市场 # market = 'future' # train_steps = args.train_steps train_steps = 20000 # 训练次数 training_data_ratio = 0.8 # training_data_ratio = args.training_data_ratio # 初始化股票市场 env = Market(codes, start_date="2008-01-01", end_date="2019-07-19", # 可选参数可接受如下字典参数 **{ "market": market, "use_sequence": True, "scaler": MinMaxScaler, "mix_index_state": False, "training_data_ratio": training_data_ratio,} ) model_name = os.path.basename(__file__).split('.')[0] algorithm = Algorithm(tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": mode, "hidden_size": 5, "enable_saver": True, "train_steps": train_steps, "enable_summary_writer": True, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), }) algorithm.run() algorithm.eval_and_plot()
def main(args): # mode = args.mode mode = "train" # codes = args.codes # codes = ["601398"] codes = ["600036", "601328", "601998", "601398"] # codes = ["AU88", "RB88", "CU88", "AL88"] market = args.market # default="stock" train_steps = args.train_steps # default=100000 training_data_ratio = 0.8 # training_data_ratio = args.training_data_ratio # env为股票市场market, market市场实例化,**可选参数传入 env = Market(codes, start_date="2008-01-01", end_date="2019-07-19", **{ "market": market,## default="stock" "use_sequence": True, "scaler": MinMaxScaler, # sklearn提供的缩放器 "mix_index_state": False,# 表明要混合上证指数,以结合市场趋势做更宏观的预测 "training_data_ratio": training_data_ratio, }) model_name = os.path.basename(__file__).split('.')[0] # 返回“TreNet,即文件名 # 算法初始化,这里是TreNet实例化, 传入一系列**可选参数 algorithm = Algorithm(tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": mode,# test "hidden_size": 5, # 应该是5层hidden layer "enable_saver": True, "train_steps": train_steps,# default=100000 "enable_summary_writer": True, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), }) algorithm.run() algorithm.eval_and_plot()
def init(context): context.s1 = '600036.XSHG' update_universe(context.s1) context.has_save_data = False mode = 'run' market = 'stock' training_data_ratio = 0.9 train_steps = 30000 base = config.get('base') codes = ['600036'] env = Market(codes, start_date=base.get('start_date'), end_date=base.get('end_date'), **{ "market": market, "use_sequence": True, "scaler": MinMaxScaler, "mix_index_state": True, "training_data_ratio": training_data_ratio }) model_name = 'DualAttnRNN' # os.path.basename(__file__).split('.')[0] context.bar_list_origin = [] context.bar_list = [] context.scale = MinMaxScaler() context.algorithm = Algorithm( tf.Session(config=alg_config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": mode, "hidden_size": 5, "enable_saver": True, "train_steps": train_steps, "enable_summary_writer": True, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), } )
def generator(code, start_date, end_date, market="stock", mode='trade'): training_data_ratio = 0.8 episode = 500 env = Market( code, start_date=start_date, end_date=end_date, **{ "market": market, # "use_sequence": True, "logger": generate_market_logger(model_name), "training_data_ratio": training_data_ratio, }) return Algorithm( tf.Session(config=config), env, env.trader.action_space, env.data_dim, **{ "mode": mode, "episodes": episode, "enable_saver": True, "learning_rate": 0.003, "enable_summary_writer": True, "logger": generate_algorithm_logger(model_name), "save_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "RL", model_name, market, "summary"), })
def main(args): mode = args.mode # mode = "test" codes = ["SH_index_all"] # codes = ["600036", "601998"] # codes = args.codes # codes = ["AU88", "RB88", "CU88", "AL88"] market = args.market # train_steps = args.train_steps # train_steps = 5000 train_steps = 30000 # training_data_ratio = 0.98 training_data_ratio = args.training_data_ratio env = Market(codes, start_date="2001-01-03", end_date="2019-03-08", **{ "market": market, "use_sequence": True, "seq_length": 5, "scaler": MinMaxScaler(feature_range=(0, 1)), "mix_index_state": True, "training_data_ratio": 0.8, }) model_name = os.path.basename(__file__).split('.')[0] algorithm = Algorithm( tf.Session(config=config), env, env.seq_length, env.data_dim, env.code_count, **{ "mode": mode, "layer_size": 2, "hidden_size": 48, # "keep_prob": 0.98, # drop out size = 1 - keep_prob "enable_saver": True, "train_steps": train_steps, "enable_summary_writer": True, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), }) algorithm.run() # algorithm.eval_and_plot_backtest_single(code=codes[0], # model_name=model_name, # MyStrategy=MyStrategy, # DataFeed=PandasDeepLearning, # plot=True) algorithm.eval_and_plot_backtest(code=codes[0], model_name=model_name)
def main(args): mode = args.mode # mode = "test" codes = ["SH_index_all"] # codes = ["600036", "601998"] # codes = args.codes # codes = ["AU88", "RB88", "CU88", "AL88"] market = args.market # train_steps = args.train_steps # train_steps = 5000 train_steps = 10000 # training_data_ratio = 0.98 training_data_ratio = args.training_data_ratio col_order = [ 'open', 'high', 'low', 'close', 'volume', 'stoch_14_3_0_3_0_0', 'stoch_14_3_0_3_0_1', 'trend_backward|close|10_5_10', 'rsi|close|14', 'roc|close|20', 'tp_score', 'on', '6m', 'nasvolume', 'nasclose', 'trend_backward|nasclose|10_5_10', 'trend|close|10_5_10' ] ''' Index(['open', 'high', 'low', 'close', 'volume', 'on', '6m', 'nasclose', 'nasvolume', 'tp_score', 'stoch_14_3_0_3_0_0', 'stoch_14_3_0_3_0_1', 'trend_backward|close|10_5_3', 'rsi|close|14', 'roc|close|20', 'trend_backward|on|10_5_2', 'trend_backward|6m|10_5_2', 'trend_backward|nasclose|10_5_2', 'trend|close|10_5_20'], dtype='object') ''' env = Market(codes, start_date="2008-01-01", end_date="2019-03-08", col_order=col_order, **{ "market": market, "use_sequence": True, "seq_length": 5, "scaler": MinMaxScaler(feature_range=(0, 1)), "mix_index_state": True, "training_data_ratio": training_data_ratio, }) model_name = os.path.basename(__file__).split('.')[0] algorithm = Algorithm( tf.Session(config=config), env, env.seq_length, env.data_dim, [5, 6, 2, 3], env.code_count, **{ "mode": mode, "layer_size": 1, "hidden_size": 16, "keep_prob": 1, # drop out size = 1 - keep_prob "enable_saver": True, "train_steps": train_steps, "enable_summary_writer": True, "save_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "model"), "summary_path": os.path.join(CHECKPOINTS_DIR, "SL", model_name, market, "summary"), }) algorithm.run() algorithm.eval_and_plot_backtest(code=codes[0], model_name=model_name)