def demo41(): args = Arguments(if_on_policy=True) args.agent_rl = agent.AgentGaePPO # agent.AgentPPO import pybullet_envs # for python-bullet-gym dir(pybullet_envs) args.env = decorate_env(gym.make('ReacherBulletEnv-v0')) args.break_step = int(5e4 * 8) # (5e4) 1e5, UsedTime: (400s) 800s args.repeat_times = 2 ** 3 args.reward_scale = 2 ** 1 # (-15) 18 (30) args.eval_times1 = 2 ** 2 args.eval_times1 = 2 ** 6 args.rollout_num = 4 train_and_evaluate__multiprocessing(args)
def demo42(): args = Arguments(if_on_policy=True) args.agent_rl = agent.AgentGaePPO # agent.AgentPPO import pybullet_envs # for python-bullet-gym dir(pybullet_envs) args.env = decorate_env(gym.make('AntBulletEnv-v0')) args.break_step = int(5e6 * 8) # (1e6) 5e6 UsedTime: 25697s args.reward_scale = 2 ** -3 # args.repeat_times = 2 ** 4 args.net_dim = 2 ** 9 args.batch_size = 2 ** 8 args.max_memo = 2 ** 12 args.show_gap = 2 ** 6 args.eval_times1 = 2 ** 2 args.rollout_num = 4 train_and_evaluate__multiprocessing(args)
def demo5(): args = Arguments(if_on_policy=False) # args.agent_rl = agent.AgentModSAC args.agent_rl = agent.AgentInterSAC import pybullet_envs # for python-bullet-gym dir(pybullet_envs) args.env = decorate_env(gym.make('AntBulletEnv-v0')) # args.env = decorate_env(gym.make('ReacherBulletEnv-v0')) args.break_step = int(1e6 * 8) # (5e5) 1e6, UsedTime: (15,000s) 30,000s args.reward_scale = 2 ** -2 # (-50) 0 ~ 2500 (3340) args.max_memo = 2 ** 19 args.net_dim = 2 ** 7 # todo args.eva_size = 2 ** 5 # for Recorder args.show_gap = 2 ** 8 # for Recorder train_and_evaluate(args)