示例#1
0
文件: beta0.py 项目: z4z5/ElegantRL
def demo42():
    args = Arguments(if_on_policy=True)
    args.agent_rl = agent.AgentGaePPO  # agent.AgentPPO

    import pybullet_envs  # for python-bullet-gym
    dir(pybullet_envs)
    args.env = decorate_env(gym.make('AntBulletEnv-v0'))
    args.break_step = int(5e6 * 8)  # (1e6) 5e6 UsedTime: 25697s
    args.reward_scale = 2 ** -3  #
    args.repeat_times = 2 ** 4
    args.net_dim = 2 ** 9
    args.batch_size = 2 ** 8
    args.max_memo = 2 ** 12
    args.show_gap = 2 ** 6
    args.eval_times1 = 2 ** 2

    args.rollout_num = 4
    train_and_evaluate__multiprocessing(args)
示例#2
0
文件: beta0.py 项目: z4z5/ElegantRL
def demo5():
    args = Arguments(if_on_policy=False)
    # args.agent_rl = agent.AgentModSAC
    args.agent_rl = agent.AgentInterSAC

    import pybullet_envs  # for python-bullet-gym
    dir(pybullet_envs)
    args.env = decorate_env(gym.make('AntBulletEnv-v0'))
    # args.env = decorate_env(gym.make('ReacherBulletEnv-v0'))

    args.break_step = int(1e6 * 8)  # (5e5) 1e6, UsedTime: (15,000s) 30,000s
    args.reward_scale = 2 ** -2  # (-50) 0 ~ 2500 (3340)
    args.max_memo = 2 ** 19
    args.net_dim = 2 ** 7  # todo
    args.eva_size = 2 ** 5  # for Recorder
    args.show_gap = 2 ** 8  # for Recorder

    train_and_evaluate(args)