예제 #1
0
def demo_discrete_action_off_policy():
    args = Arguments()
    from elegantrl2.tutorial.agent import AgentDoubleDQN  # AgentDQN
    args.agent = AgentDoubleDQN()
    '''choose environment'''
    if_train_cart_pole = 0
    if if_train_cart_pole:
        "TotalStep: 5e4, TargetReward: 200, UsedTime: 60s"
        args.env = PreprocessEnv(env='CartPole-v0')
        args.net_dim = 2**7
        args.target_step = args.env.max_step * 2

    if_train_lunar_lander = 1
    if if_train_lunar_lander:
        "TotalStep: 2e5, TargetReturn: 200, UsedTime: 400s, LunarLander-v2, PPO"
        args.env = PreprocessEnv(env=gym.make('LunarLander-v2'))
        args.net_dim = 2**8
        args.batch_size = args.net_dim
    '''train and evaluate'''
    train_and_evaluate(args)
예제 #2
0
def demo_continuous_action_off_policy():
    args = Arguments()
    args.gpu_id = sys.argv[-1][-4]

    from elegantrl2.tutorial.agent import AgentSAC  # AgentDDPG AgentTD3
    args.agent = AgentSAC()
    '''choose environment'''
    if_train_pendulum = 1
    if if_train_pendulum:
        "TotalStep: 4e5, TargetReward: -200, UsedTime: 400s"
        env = gym.make('Pendulum-v0')
        env.target_return = -200  # set target_reward manually for env 'Pendulum-v0'
        args.env = PreprocessEnv(env=env)
        args.reward_scale = 2**-3  # RewardRange: -1800 < -200 < -50 < 0
        args.net_dim = 2**7
        args.batch_size = args.net_dim
        args.target_step = args.env.max_step * 4

    if_train_lunar_lander = 0
    if if_train_lunar_lander:
        "TotalStep: 4e5, TargetReward: 200, UsedTime: 900s"
        args.env = PreprocessEnv(env=gym.make('LunarLanderContinuous-v2'))
        args.reward_scale = 2**0  # RewardRange: -800 < -200 < 200 < 302

    if_train_bipedal_walker = 0
    if if_train_bipedal_walker:
        "TotalStep: 8e5, TargetReward: 300, UsedTime: 1800s"
        args.env = PreprocessEnv(env=gym.make('BipedalWalker-v3'))
        args.reward_scale = 2**0  # RewardRange: -200 < -150 < 300 < 334
        args.gamma = 0.97
        args.if_per_or_gae = True
    '''train and evaluate'''
    train_and_evaluate(args)
예제 #3
0
def demo_discrete_action_on_policy():
    args = Arguments(
        if_on_policy=True
    )  # hyper-parameters of on-policy is different from off-policy
    from elegantrl2.tutorial.agent import AgentDiscretePPO
    args.agent = AgentDiscretePPO()
    '''choose environment'''
    if_train_cart_pole = 0
    if if_train_cart_pole:
        "TotalStep: 5e4, TargetReward: 200, UsedTime: 60s"
        args.env = PreprocessEnv(env='CartPole-v0')
        args.net_dim = 2**7
        args.batch_size = args.net_dim * 2
        args.repeat_times = 2**4
        args.target_step = args.env.max_step * 8
        args.if_per_or_gae = True

    if_train_lunar_lander = 1
    if if_train_lunar_lander:
        "TotalStep: 2e5, TargetReturn: 200, UsedTime: 400s, LunarLander-v2, PPO"
        args.env = PreprocessEnv(env=gym.make('LunarLander-v2'))
        args.agent.cri_target = False
        args.reward_scale = 2**-1
        args.net_dim = 2**8
        args.batch_size = args.net_dim * 4
        args.target_step = args.env.max_step * 4
        args.repeat_times = 2**5
        args.if_per_or_gae = True
    '''train and evaluate'''
    train_and_evaluate(args)
예제 #4
0
def check_stock_trading_env():
    if_eval = True  # False

    env = StockTradingEnv(if_eval=if_eval)
    action_dim = env.action_dim

    state = env.reset()
    print('| check_stock_trading_env, state_dim', len(state))

    from time import time
    timer = time()

    # ============================================================
    policy_name = 'Random Action 1e-2'
    step = 1
    done = False
    episode_return = 0

    # state = env.reset()
    while not done:
        action = rd.uniform(-1, 1, size=action_dim) * 1e-2
        next_state, reward, done, _ = env.step(action)
        # print(';', len(next_state), env.day, reward)
        episode_return += reward
        step += 1

    print()
    print(f"| {policy_name}:")
    print(f"| step {step}, UsedTime {time() - timer:.3e}")
    print(f"| gamma_reward \t\t\t{env.gamma_reward:.3e}")
    print(f"| episode return \t\t{episode_return:.3e}")
    print(
        f"| discount return \t\t{episode_return / step / (1 - env.gamma):.3e}")
    print(f"| env episode return \t{env.episode_return:.3e}")

    # ============================================================
    policy_name = 'Buy  4 Action'
    step = 1
    done = False
    episode_return = 0

    # state = env.reset()
    while not done:
        action = np.zeros(action_dim)
        action[:3] = 1

        next_state, reward, done, _ = env.step(action)
        # print(';', len(next_state), env.day, reward)
        episode_return += reward
        step += 1

    print()
    print(f"| {policy_name}:")
    print(f"| step {step}, UsedTime {time() - timer:.3e}")
    print(f"| gamma_reward \t\t\t{env.gamma_reward:.3e}")
    print(f"| episode return \t\t{episode_return:.3e}")
    print(
        f"| discount return \t\t{episode_return / step / (1 - env.gamma):.3e}")
    print(f"| env episode return \t{env.episode_return:.3e}")

    # ============================================================
    '''draw_cumulative_return'''
    from elegantrl2.agent import AgentPPO
    from elegantrl2.run import Arguments
    args = Arguments(if_on_policy=True)
    args.agent = AgentPPO()
    args.env = StockTradingEnv(if_eval=True)
    args.if_remove = False
    args.cwd = './StockTradingEnv-v1_AgentPPO'
    args.init_before_training()

    env.draw_cumulative_return(args, torch)
예제 #5
0
def demo_continuous_action_on_policy():
    args = Arguments(
        if_on_policy=True
    )  # hyper-parameters of on-policy is different from off-policy
    from elegantrl2.tutorial.agent import AgentPPO
    args.agent = AgentPPO()
    '''choose environment'''
    if_train_pendulum = 0
    if if_train_pendulum:
        "TotalStep: 4e5, TargetReward: -200, UsedTime: 400s"
        env = gym.make('Pendulum-v0')
        env.target_return = -200  # set target_reward manually for env 'Pendulum-v0'
        args.env = PreprocessEnv(env=env)
        args.reward_scale = 2**-3  # RewardRange: -1800 < -200 < -50 < 0
        args.net_dim = 2**7
        args.batch_size = args.net_dim * 2
        args.target_step = args.env.max_step * 16

    if_train_lunar_lander = 0
    if if_train_lunar_lander:
        "TotalStep: 4e5, TargetReward: 200, UsedTime: 900s"
        args.env = PreprocessEnv(env=gym.make('LunarLanderContinuous-v2'))
        args.reward_scale = 2**0  # RewardRange: -800 < -200 < 200 < 302

    if_train_bipedal_walker = 1
    if if_train_bipedal_walker:
        "TotalStep: 8e5, TargetReward: 300, UsedTime: 1800s"
        args.env = PreprocessEnv(env=gym.make('BipedalWalker-v3'))
        args.reward_scale = 2**0  # RewardRange: -200 < -150 < 300 < 334
        args.gamma = 0.97
        args.if_per_or_gae = True
        # args.agent.lambda_entropy = 0.05
    '''train and evaluate'''
    train_and_evaluate(args)
예제 #6
0
파일: demo.py 프로젝트: greedforgood/Python
def demo_custom_env_finance_rl_dow30():  # 1.7+ 2.0+
    args = Arguments(
        if_on_policy=True
    )  # hyper-parameters of on-policy is different from off-policy
    args.random_seed = 19430

    from elegantrl2.agent import AgentPPO
    args.agent = AgentPPO()
    args.agent.cri_target = True
    args.agent.lambda_entropy = 0.02

    args.gamma = 0.995

    from envs.FinRL.StockTrading import StockEnvDOW30, StockVecEnvDOW30
    args.env = StockEnvDOW30(if_eval=False, gamma=args.gamma)
    args.env_eval = StockEnvDOW30(if_eval=True, gamma=args.gamma)

    args.repeat_times = 2**4
    args.learning_rate = 2**-14
    args.net_dim = 2**8
    args.batch_size = args.net_dim

    args.eval_gap = 2**7
    args.eval_times1 = 2**0
    args.eval_times2 = 2**1
    args.break_step = int(10e6)
    args.if_allow_break = False

    if_single_env = 0
    if if_single_env:
        args.gpu_id = int(sys.argv[-1][-4])
        args.random_seed += int(args.gpu_id)
        args.target_step = args.env.max_step * 4
        args.worker_num = 4
        train_and_evaluate_mp(args)

    if_batch_env = 1
    if if_batch_env:
        args.env = StockVecEnvDOW30(if_eval=False, gamma=args.gamma, env_num=4)
        args.gpu_id = int(sys.argv[-1][-4])
        args.random_seed += args.gpu_id
        args.target_step = args.env.max_step
        args.worker_num = 4
        train_and_evaluate_mp(args)

    if_multi_learner = 0
    if if_multi_learner:
        args.env = StockVecEnvDOW30(if_eval=False, gamma=args.gamma, env_num=2)
        args.gpu_id = (0, 1)
        args.worker_num = 2
        train_and_evaluate_mg(args)
예제 #7
0
파일: demo.py 프로젝트: greedforgood/Python
def demo_custom_env_finance_rl_nas89():
    args = Arguments(
        if_on_policy=True
    )  # hyper-parameters of on-policy is different from off-policy
    args.random_seed = 1943

    from elegantrl2.agent import AgentPPO
    args.agent = AgentPPO()
    args.agent.cri_target = True
    args.agent.lambda_entropy = 0.04

    from envs.FinRL.StockTrading import StockEnvDOW30, StockEnvNAS89, StockVecEnvNAS89
    args.gamma = 0.999

    if_dow30_daily = 1
    if if_dow30_daily:
        args.env = StockEnvDOW30(if_eval=False, gamma=args.gamma)
        args.env_eval = StockEnvDOW30(if_eval=True, gamma=args.gamma)
    else:  # elif if_nas89_minute:
        args.env = StockEnvNAS89(if_eval=False, gamma=args.gamma)
        args.env_eval = StockEnvNAS89(if_eval=True, gamma=args.gamma)

    args.repeat_times = 2**4
    args.learning_rate = 2**-14
    args.net_dim = int(2**8 * 1.5)
    args.batch_size = args.net_dim * 4
    args.target_step = args.env.max_step

    args.eval_gap = 2**8
    args.eval_times1 = 2**0
    args.eval_times2 = 2**1
    args.break_step = int(16e6)
    args.if_allow_break = False

    if_single_env = 1
    if if_single_env:
        args.gpu_id = 0
        args.worker_num = 4
        train_and_evaluate_mp(args)

    if_batch_env = 0
    if if_batch_env:
        args.env = StockVecEnvNAS89(if_eval=False, gamma=args.gamma, env_num=2)
        args.gpu_id = 3
        args.random_seed += args.gpu_id
        args.worker_num = 2
        train_and_evaluate_mp(args)

    if_multi_learner = 0
    if if_multi_learner:
        args.env = StockVecEnvNAS89(if_eval=False, gamma=args.gamma, env_num=2)
        args.gpu_id = (0, 1)
        args.worker_num = 2
        train_and_evaluate_mg(args)
예제 #8
0
파일: demo.py 프로젝트: greedforgood/Python
def demo_custom_env_finance_rl():
    args = Arguments(
        if_on_policy=True
    )  # hyper-parameters of on-policy is different from off-policy
    args.random_seed = 0

    from elegantrl2.agent import AgentPPO
    args.agent = AgentPPO()
    args.agent.cri_target = True
    args.agent.lambda_entropy = 0.04

    from envs.FinRL.StockTrading import StockEnvNAS89, StockVecEnvNAS89
    args.gamma = 0.999
    args.env = StockEnvNAS89(if_eval=False, gamma=args.gamma)
    args.env_eval = StockEnvNAS89(if_eval=True, gamma=args.gamma)

    args.repeat_times = 2**4
    args.learning_rate = 2**-14
    args.net_dim = int(2**8 * 1.5)
    args.batch_size = args.net_dim * 4

    if_single_env = 0
    if if_single_env:
        args.gpu_id = 0
        args.worker_num = 4
        train_and_evaluate_mp(args)

    if_batch_env = 1
    if if_batch_env:
        args.env = StockVecEnvNAS89(if_eval=False, gamma=args.gamma, env_num=2)
        args.gpu_id = 0
        args.worker_num = 2
        train_and_evaluate_mp(args)

    if_multi_learner = 0
    if if_multi_learner:
        args.env = StockVecEnvNAS89(if_eval=False, gamma=args.gamma, env_num=2)
        args.gpu_id = (0, 1)
        args.worker_num = 2
        train_and_evaluate_mg(args)

    "TotalStep: 52e5, TargetReturn: 2.35, UsedTime:  3934s, FinanceStock-v2"
    "TotalStep: 81e5, TargetReturn: 2.47, UsedTime:  6129s, FinanceStock-v2"
    "TotalStep: 19e5, TargetReturn: 2.50, UsedTime:  1654s, FinanceStock-v2 GPU 2, 3"
    "TotalStep: 65e5, TargetReturn: 4.61, UsedTime:  5659s, FinanceStock-v2 GPU 2, 3"
    "TotalStep: 18e5, TargetReturn: 2.50, UsedTime:  1452s, FinanceStock-v2 GPU 0, 1"
    "TotalStep: 61e5, TargetReturn: 3.92, UsedTime:  4921s, FinanceStock-v2 GPU 0, 1"
    "TotalStep:  4e5, TargetReturn: 2.20, UsedTime:   583s, FinanceStock-v2 GPU 0, 1, 2, 3"
    "TotalStep: 11e6, TargetReturn: 4.39, UsedTime:  9648s, FinanceStock-v2 GPU 0, 1, 2, 3"
예제 #9
0
파일: demo.py 프로젝트: greedforgood/Python
def demo_continuous_action_on_policy():
    args = Arguments(
        if_on_policy=True
    )  # hyper-parameters of on-policy is different from off-policy
    from elegantrl2.agent import AgentPPO
    args.agent = AgentPPO()
    args.gpu_id = sys.argv[-1][-4]
    args.agent.cri_target = True
    args.learning_rate = 2**-14
    args.random_seed = 1943
    '''choose environment'''
    if_train_pendulum = 0
    if if_train_pendulum:
        "TotalStep: 4e5, TargetReward: -200, UsedTime: 400s"
        env = gym.make('Pendulum-v0')
        env.target_return = -200  # set target_reward manually for env 'Pendulum-v0'
        args.env = PreprocessEnv(env=env)
        args.reward_scale = 2**-3  # RewardRange: -1800 < -200 < -50 < 0
        args.net_dim = 2**7
        args.batch_size = args.net_dim * 2
        args.target_step = args.env.max_step * 16

    if_train_lunar_lander = 0
    if if_train_lunar_lander:
        "TotalStep: 6e5, TargetReward: 200, UsedTime: 800s"
        env_name = 'LunarLanderContinuous-v2'
        # args.env = PreprocessEnv(env=env_name)
        args.env = PreprocessVecEnv(env=env_name, env_num=2)
        args.env_eval = PreprocessEnv(env=env_name)
        args.reward_scale = 2**0  # RewardRange: -800 < -200 < 200 < 302
        args.break_step = int(8e6)
        args.if_per_or_gae = True
        args.target_step = args.env.max_step * 8
        args.repeat_times = 2**4

    if_train_bipedal_walker = 1
    if if_train_bipedal_walker:
        "TotalStep: 8e5, TargetReward: 300, UsedTime: 1800s"
        env_name = 'BipedalWalker-v3'
        # args.env = PreprocessEnv(env=env_name)
        args.env = PreprocessVecEnv(env=env_name, env_num=2)
        args.env_eval = PreprocessEnv(env=env_name)
        args.reward_scale = 2**0  # RewardRange: -200 < -150 < 300 < 334
        args.gamma = 0.97
        args.target_step = args.env.max_step * 8
        args.repeat_times = 2**4
        args.if_per_or_gae = True
        args.agent.lambda_entropy = 0.05
        args.break_step = int(8e6)
    '''train and evaluate'''
    # train_and_evaluate(args)
    args.worker_num = 2
    train_and_evaluate_mp(args)
예제 #10
0
def demo_custom_env_finance_rl_nas89():  # 1.7+ 2.0+
    args = Arguments(
        if_on_policy=True
    )  # hyper-parameters of on-policy is different from off-policy
    args.random_seed = 19430

    from elegantrl2.agent import AgentPPO
    args.agent = AgentPPO()
    args.agent.lambda_entropy = 0.02

    from envs.FinRL.StockTrading import StockEnvNAS89
    args.gamma = 0.999
    args.env = StockEnvNAS89(if_eval=False,
                             gamma=args.gamma,
                             turbulence_thresh=30)
    args.eval_env = StockEnvNAS89(if_eval=True,
                                  gamma=args.gamma,
                                  turbulence_thresh=15)

    args.net_dim = 2**9
    args.repeat_times = 2**4
    args.learning_rate = 2**-14
    args.batch_size = args.net_dim * 4

    args.eval_gap = 2**8
    args.eval_times1 = 2**0
    args.eval_times2 = 2**1
    args.break_step = int(8e6)
    args.if_allow_break = False

    if_single_proc = 0
    if if_single_proc:
        args.gpu_id = int(sys.argv[-1][-4])
        args.random_seed += int(args.gpu_id)
        args.target_step = args.env.max_step * 4
        train_and_evaluate(args)

    if_single_env = 1
    if if_single_env:
        args.gpu_id = int(sys.argv[-1][-4])
        args.random_seed += int(args.gpu_id)
        args.target_step = args.env.max_step * 1
        args.worker_num = 4
        train_and_evaluate_mp(args)

    if_multi_learner = 0
    if if_multi_learner:
        args.gpu_id = (2, 3) if len(sys.argv) == 1 else eval(
            sys.argv[-1])  # python main.py -GPU 0,1
        args.repeat_times = 2**4
        args.target_step = args.env.max_step
        args.worker_num = 4
        train_and_evaluate_mg(args)

    if_batch_env = 0
    if if_batch_env:
        from envs.FinRL.StockTrading import StockVecEnvNAS89
        args.env = StockVecEnvNAS89(if_eval=False, gamma=args.gamma, env_num=2)
        args.gpu_id = int(sys.argv[-1][-4])
        args.random_seed += args.gpu_id
        args.target_step = args.env.max_step
        args.worker_num = 4
        train_and_evaluate_mp(args)
예제 #11
0
def demo_custom_env_finance_rl():
    args = Arguments(
        if_on_policy=True
    )  # hyper-parameters of on-policy is different from off-policy
    args.random_seed = 0

    from elegantrl2.agent import AgentPPO
    args.agent = AgentPPO()
    args.agent.cri_target = True
    args.agent.lambda_entropy = 0.04

    from envs.FinRL.StockTrading import StockEnvNAS89, StockVecEnvNAS89
    args.gamma = 0.999
    args.env = StockEnvNAS89(if_eval=False, gamma=args.gamma)
    args.eval_env = StockEnvNAS89(if_eval=True, gamma=args.gamma)

    args.repeat_times = 2**4
    args.learning_rate = 2**-14
    args.net_dim = int(2**8 * 1.5)
    args.batch_size = args.net_dim * 4

    if_single_env = 0
    if if_single_env:
        args.gpu_id = 0
        args.worker_num = 4
        train_and_evaluate_mp(args)

    if_batch_env = 1
    if if_batch_env:
        args.env = StockVecEnvNAS89(if_eval=False, gamma=args.gamma, env_num=2)
        args.gpu_id = 0
        args.worker_num = 2
        train_and_evaluate_mp(args)

    if_multi_learner = 0
    if if_multi_learner:
        args.env = StockVecEnvNAS89(if_eval=False, gamma=args.gamma, env_num=2)
        args.gpu_id = (0, 1)
        args.worker_num = 2
        train_and_evaluate_mg(args)
예제 #12
0
def demo_discrete_action_off_policy():
    args = Arguments()
    from elegantrl2.agent import AgentD3QN as Agent  # AgentDQN
    args.agent = Agent()
    '''choose environment'''
    if_train_cart_pole = 0
    if if_train_cart_pole:
        "TotalStep: 5e4, TargetReward: 200, UsedTime: 60s"
        args.env = PreprocessEnv(env='CartPole-v0')
        args.net_dim = 2**7
        args.target_step = args.env.max_step * 2

        train_and_evaluate(args)

    if_train_lunar_lander = 1
    if if_train_lunar_lander:
        "TotalStep: 2e5, TargetReturn: 200, UsedTime: 400s, LunarLander-v2, PPO"
        args.env = PreprocessEnv(env=gym.make('LunarLander-v2'))
        args.net_dim = 2**8
        args.batch_size = args.net_dim

        train_and_evaluate(args)

    if_train_lunar_lander_mp = 0
    if if_train_lunar_lander_mp:
        "TotalStep: 36e4, EpisodeReturn: 200, UsedTime: 1181s, LunarLander-v2, D3QN"
        "TotalStep: 52e4, EpisodeReturn: 260, UsedTime: 2148s, LunarLander-v2, D3QN"
        args.env = PreprocessEnv(env=gym.make('LunarLander-v2'))
        args.net_dim = 2**8
        args.batch_size = args.net_dim
        args.target_step = args.env.max_step * 1
        args.worker_num = 4

        args.reward_scale = 2**-4
        args.max_memo = 2**19
        args.break_step = 2**19
        args.if_allow_break = False
        args.gpu_id = sys.argv[-1]

        train_and_evaluate_mp(args)