Ejemplo n.º 1
0
def optimize_code(params):
    from lib.RLTrader import RLTrader

    trader = RLTrader(**params)
    trader.optimize()

    return ""
Ejemplo n.º 2
0
def run_optimize(args, logger):
    from lib.RLTrader import RLTrader

    trader = RLTrader(**vars(args),
                      logger=logger,
                      reward_strategy=reward_strategy)
    trader.optimize(n_trials=args.trials)
Ejemplo n.º 3
0
 def test_that_args_get_injected_correctly(self, data_mock, opt_mock,
                                           init_mock):
     args = self.parser.parse_args(['optimize'])
     sut = RLTrader(**vars(args), logger=MagicMock())
     sut.study_name = 'test'
     with mock.patch('lib.util.logger.init_logger'):
         assert (sut.tensorboard_path == args.tensorboard_path)
         assert (sut.params_db_path == args.params_db_path)
         assert (sut.model_verbose == args.model_verbose)
         assert (sut.nminibatches == args.nminibatches)
         assert (sut.train_split_percentage == args.train_split_percentage)
         assert (sut.input_data_path == args.input_data_path)
         assert (sut.model_verbose == args.model_verbose)
Ejemplo n.º 4
0
import numpy as np

from lib.RLTrader import RLTrader
from lib.TraderArgs import TraderArgs
from lib.util.logger import init_logger

np.warnings.filterwarnings('ignore')
option_parser = TraderArgs()
args = option_parser.get_args()

if __name__ == '__main__':
    logger = init_logger(__name__, show_debug=args.debug)
    trader = RLTrader(**vars(args), logger=logger)

    if args.command == 'optimize':
        trader.optimize(n_trials=args.trials,
                        n_parallel_jobs=args.parallel_jobs)
    elif args.command == 'train':
        trader.train(n_epochs=args.epochs)
    elif args.command == 'test':
        trader.test(model_epoch=args.model_epoch, should_render=args.no_render)
    elif args.command == 'opt-train-test':
        trader.optimize(args.trials, args.parallel_jobs)
        trader.train(n_epochs=args.train_epochs,
                     test_trained_model=args.no_test,
                     render_trained_model=args.no_render)
Ejemplo n.º 5
0
import os
import numpy as np

from multiprocessing import Pool

np.warnings.filterwarnings('ignore')


def optimize_code(params):
    from lib.RLTrader import RLTrader

    trader = RLTrader(**params)
    trader.optimize()


if __name__ == '__main__':
    n_processes = os.cpu_count()
    params = {'n_envs': n_processes}

    opt_pool = Pool(processes=n_processes)
    opt_pool.map(optimize_code, [params for _ in range(n_processes)])

    from lib.RLTrader import RLTrader

    trader = RLTrader(**params)
    trader.train(test_trained_model=True, render_trained_model=True)
Ejemplo n.º 6
0
np.warnings.filterwarnings('ignore')

trader_cli = RLTraderCLI()
args = trader_cli.get_args()

rewards = {
    "incremental-profit": IncrementalProfit,
    "weighted-unrealized-profit": WeightedUnrealizedProfit
}
reward_strategy = rewards[args.reward_strat]

if __name__ == '__main__':
    logger = init_logger(__name__, show_debug=args.debug)
    from lib.RLTrader import RLTrader
    trader = RLTrader(**vars(args),
                      logger=logger,
                      reward_strategy=reward_strategy)

    if args.command == 'train':
        trader.train(n_epochs=args.epochs,
                     save_every=args.save_every,
                     test_trained_model=args.test_trained,
                     render_test_env=args.render_test,
                     render_report=args.render_report,
                     save_report=args.save_report)
    elif args.command == 'test':
        trader.test(model_epoch=args.model_epoch,
                    render_env=args.render_env,
                    render_report=args.render_report,
                    save_report=args.save_report)
    elif args.command == 'update-static-data':
Ejemplo n.º 7
0
import numpy as np

from lib.RLTrader import RLTrader

np.warnings.filterwarnings('ignore')

if __name__ == '__main__':
    trader = RLTrader()

    trader.optimize()
    trader.train(test_trained_model=True, render_trained_model=True)
Ejemplo n.º 8
0
def optimize_code(params):
    trader = RLTrader(**params)
    trader.optimize()
Ejemplo n.º 9
0
import multiprocessing
from lib.RLTrader import RLTrader

np.warnings.filterwarnings('ignore')


def optimize_code(params):
    trader = RLTrader(**params)
    trader.optimize()


if __name__ == '__main__':
    n_process = multiprocessing.cpu_count()
    params = {'n_cpu': n_process}

    # processes = []
    # for i in range(n_process):
    #     processes.append(multiprocessing.Process(target=optimize_code, args=(params,)))

    # for p in processes:
    #     p.start()

    # for p in processes:
    #     p.join()

    trader = RLTrader(**params)
    # trader.train(test_trained_model=True, render_trained_model=True)

    trader.test(model_epoch=10)
Ejemplo n.º 10
0
def optimize_code(params):
    from lib.RLTrader import RLTrader

    trader = RLTrader(**params)
    trader.optimize()

    return ""


if __name__ == '__main__':
    n_processes = multiprocessing.cpu_count()
    params = {
        'n_envs': n_processes,
        'reward_strategy': WeightedUnrealizedProfit
    }

    opt_pool = Pool(processes=n_processes)
    results = opt_pool.imap(optimize_code,
                            [params for _ in range(n_processes)])

    # print([result.get() for result in results])

    from lib.RLTrader import RLTrader

    trader = RLTrader(**params)
    trader.train(test_trained_model=True,
                 render_test_env=True,
                 render_report=True,
                 save_report=True)
Ejemplo n.º 11
0
def run_optimize(args, logger):
    from lib.RLTrader import RLTrader

    trader = RLTrader(**vars(args), logger=logger)
    trader.optimize(args.trials)
Ejemplo n.º 12
0
    trader = RLTrader(**vars(args), logger=logger)
    trader.optimize(args.trials)


if __name__ == '__main__':
    logger = init_logger(__name__, show_debug=args.debug)

    if args.command == 'optimize':
        n_processes = args.parallel_jobs

        processes = []
        for _ in range(n_processes):
            processes.append(Process(target=run_optimize, args=(args, logger)))

        for proc in processes:
            proc.start()

        for proc in processes:
            proc.join()

    from lib.RLTrader import RLTrader

    trader = RLTrader(**vars(args), logger=logger)

    if args.command == 'train':
        trader.train(n_epochs=args.epochs)
    elif args.command == 'test':
        trader.test(model_epoch=args.model_epoch, should_render=args.no_render)
    elif args.command == 'update-static-data':
        download_data_async()
Ejemplo n.º 13
0
import numpy as np

from lib.RLTrader import RLTrader

np.warnings.filterwarnings('ignore')

if __name__ == '__main__':
    trader = RLTrader()

    #trader.optimize(n_trials=1)
    trader.train(n_epochs=10,
                 test_trained_model=True,
                 render_trained_model=True)
    # trader.test()
Ejemplo n.º 14
0
def run_concurrent_optimize(trader: RLTrader, args):
    trader.optimize(args.trials, args.trials, args.parallel_jobs)
Ejemplo n.º 15
0
def run_concurrent_optimize():
    trader = RLTrader(**vars(args))
    trader.optimize(args.trials)