import numpy as np from lib.RLTrader import RLTrader from lib.TraderArgs import TraderArgs from lib.util.logger import init_logger np.warnings.filterwarnings('ignore') option_parser = TraderArgs() args = option_parser.get_args() if __name__ == '__main__': logger = init_logger(__name__, show_debug=args.debug) trader = RLTrader(**vars(args), logger=logger) if args.command == 'optimize': trader.optimize(n_trials=args.trials, n_parallel_jobs=args.parallel_jobs) elif args.command == 'train': trader.train(n_epochs=args.epochs) elif args.command == 'test': trader.test(model_epoch=args.model_epoch, should_render=args.no_render) elif args.command == 'opt-train-test': trader.optimize(args.trials, args.parallel_jobs) trader.train(n_epochs=args.train_epochs, test_trained_model=args.no_test, render_trained_model=args.no_render)
import os import numpy as np from multiprocessing import Pool np.warnings.filterwarnings('ignore') def optimize_code(params): from lib.RLTrader import RLTrader trader = RLTrader(**params) trader.optimize() if __name__ == '__main__': n_processes = os.cpu_count() params = {'n_envs': n_processes} opt_pool = Pool(processes=n_processes) opt_pool.map(optimize_code, [params for _ in range(n_processes)]) from lib.RLTrader import RLTrader trader = RLTrader(**params) trader.train(test_trained_model=True, render_trained_model=True)
trader_cli = RLTraderCLI() args = trader_cli.get_args() rewards = { "incremental-profit": IncrementalProfit, "weighted-unrealized-profit": WeightedUnrealizedProfit } reward_strategy = rewards[args.reward_strat] if __name__ == '__main__': logger = init_logger(__name__, show_debug=args.debug) from lib.RLTrader import RLTrader trader = RLTrader(**vars(args), logger=logger, reward_strategy=reward_strategy) if args.command == 'train': trader.train(n_epochs=args.epochs, save_every=args.save_every, test_trained_model=args.test_trained, render_test_env=args.render_test, render_report=args.render_report, save_report=args.save_report) elif args.command == 'test': trader.test(model_epoch=args.model_epoch, render_env=args.render_env, render_report=args.render_report, save_report=args.save_report) elif args.command == 'update-static-data': download_data_async()
def optimize_code(params): from lib.RLTrader import RLTrader trader = RLTrader(**params) trader.optimize() return "" if __name__ == '__main__': n_processes = multiprocessing.cpu_count() params = { 'n_envs': n_processes, 'reward_strategy': WeightedUnrealizedProfit } opt_pool = Pool(processes=n_processes) results = opt_pool.imap(optimize_code, [params for _ in range(n_processes)]) # print([result.get() for result in results]) from lib.RLTrader import RLTrader trader = RLTrader(**params) trader.train(test_trained_model=True, render_test_env=True, render_report=True, save_report=True)
trader = RLTrader(**vars(args), logger=logger) trader.optimize(args.trials) if __name__ == '__main__': logger = init_logger(__name__, show_debug=args.debug) if args.command == 'optimize': n_processes = args.parallel_jobs processes = [] for _ in range(n_processes): processes.append(Process(target=run_optimize, args=(args, logger))) for proc in processes: proc.start() for proc in processes: proc.join() from lib.RLTrader import RLTrader trader = RLTrader(**vars(args), logger=logger) if args.command == 'train': trader.train(n_epochs=args.epochs) elif args.command == 'test': trader.test(model_epoch=args.model_epoch, should_render=args.no_render) elif args.command == 'update-static-data': download_data_async()
import numpy as np from lib.RLTrader import RLTrader np.warnings.filterwarnings('ignore') if __name__ == '__main__': trader = RLTrader() #trader.optimize(n_trials=1) trader.train(n_epochs=10, test_trained_model=True, render_trained_model=True) # trader.test()