def test_learning(self): pol_net = PolNet(self.env.ob_space, self.env.ac_space, h1=32, h2=32) pol = GaussianPol(self.env.ob_space, self.env.ac_space, pol_net) targ_pol_net = PolNet(self.env.ob_space, self.env.ac_space, 32, 32) targ_pol_net.load_state_dict(pol_net.state_dict()) targ_pol = GaussianPol( self.env.ob_space, self.env.ac_space, targ_pol_net) qf_net = QNet(self.env.ob_space, self.env.ac_space, h1=32, h2=32) qf = DeterministicSAVfunc(self.env.ob_space, self.env.ac_space, qf_net) targ_qf_net = QNet(self.env.ob_space, self.env.ac_space, 32, 32) targ_qf_net.load_state_dict(targ_qf_net.state_dict()) targ_qf = DeterministicSAVfunc( self.env.ob_space, self.env.ac_space, targ_qf_net) sampler = EpiSampler(self.env, pol, num_parallel=1) optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4) optim_qf = torch.optim.Adam(qf_net.parameters(), 3e-4) epis = sampler.sample(pol, max_steps=32) traj = Traj() traj.add_epis(epis) traj = ef.add_next_obs(traj) traj.register_epis() result_dict = svg.train( traj, pol, targ_pol, qf, targ_qf, optim_pol, optim_qf, 1, 32, 0.01, 0.9, 1) del sampler
def test_learning(self): ob_space = self.env.real_observation_space skill_space = self.env.skill_space ob_skill_space = self.env.observation_space ac_space = self.env.action_space ob_dim = ob_skill_space.shape[0] - 4 f_dim = ob_dim def discrim_f(x): return x pol_net = PolNet(ob_skill_space, ac_space) pol = GaussianPol(ob_skill_space, ac_space, pol_net) qf_net1 = QNet(ob_skill_space, ac_space) qf1 = DeterministicSAVfunc(ob_skill_space, ac_space, qf_net1) targ_qf_net1 = QNet(ob_skill_space, ac_space) targ_qf_net1.load_state_dict(qf_net1.state_dict()) targ_qf1 = DeterministicSAVfunc(ob_skill_space, ac_space, targ_qf_net1) qf_net2 = QNet(ob_skill_space, ac_space) qf2 = DeterministicSAVfunc(ob_skill_space, ac_space, qf_net2) targ_qf_net2 = QNet(ob_skill_space, ac_space) targ_qf_net2.load_state_dict(qf_net2.state_dict()) targ_qf2 = DeterministicSAVfunc(ob_skill_space, ac_space, targ_qf_net2) qfs = [qf1, qf2] targ_qfs = [targ_qf1, targ_qf2] log_alpha = nn.Parameter(torch.ones(())) high = np.array([np.finfo(np.float32).max]*f_dim) f_space = gym.spaces.Box(-high, high, dtype=np.float32) discrim_net = DiaynDiscrimNet( f_space, skill_space, h_size=100, discrim_f=discrim_f) discrim = DeterministicSVfunc(f_space, discrim_net) optim_pol = torch.optim.Adam(pol_net.parameters(), 1e-4) optim_qf1 = torch.optim.Adam(qf_net1.parameters(), 3e-4) optim_qf2 = torch.optim.Adam(qf_net2.parameters(), 3e-4) optim_qfs = [optim_qf1, optim_qf2] optim_alpha = torch.optim.Adam([log_alpha], 1e-4) optim_discrim = torch.optim.SGD(discrim.parameters(), lr=0.001, momentum=0.9) off_traj = Traj() sampler = EpiSampler(self.env, pol, num_parallel=1) epis = sampler.sample(pol, max_steps=200) on_traj = Traj() on_traj.add_epis(epis) on_traj = ef.add_next_obs(on_traj) on_traj = ef.compute_diayn_rews( on_traj, lambda x: diayn_sac.calc_rewards(x, 4, discrim)) on_traj.register_epis() off_traj.add_traj(on_traj) step = on_traj.num_step log_alpha = nn.Parameter(np.log(0.1)*torch.ones(())) # fix alpha result_dict = diayn_sac.train( off_traj, pol, qfs, targ_qfs, log_alpha, optim_pol, optim_qfs, optim_alpha, step, 128, 5e-3, 0.99, 1, discrim, 4, True) discrim_losses = diayn.train( discrim, optim_discrim, on_traj, 32, 100, 4) del sampler
def test_learning(self): pol_net = PolNet(self.env.ob_space, self.env.ac_space, h1=32, h2=32) pol = GaussianPol(self.env.ob_space, self.env.ac_space, pol_net) qf_net1 = QNet(self.env.ob_space, self.env.ac_space) qf1 = DeterministicSAVfunc(self.env.ob_space, self.env.ac_space, qf_net1) targ_qf_net1 = QNet(self.env.ob_space, self.env.ac_space) targ_qf_net1.load_state_dict(qf_net1.state_dict()) targ_qf1 = DeterministicSAVfunc(self.env.ob_space, self.env.ac_space, targ_qf_net1) qf_net2 = QNet(self.env.ob_space, self.env.ac_space) qf2 = DeterministicSAVfunc(self.env.ob_space, self.env.ac_space, qf_net2) targ_qf_net2 = QNet(self.env.ob_space, self.env.ac_space) targ_qf_net2.load_state_dict(qf_net2.state_dict()) targ_qf2 = DeterministicSAVfunc(self.env.ob_space, self.env.ac_space, targ_qf_net2) qfs = [qf1, qf2] targ_qfs = [targ_qf1, targ_qf2] log_alpha = nn.Parameter(torch.zeros(())) sampler = EpiSampler(self.env, pol, num_parallel=1) optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4) optim_qf1 = torch.optim.Adam(qf_net1.parameters(), 3e-4) optim_qf2 = torch.optim.Adam(qf_net2.parameters(), 3e-4) optim_qfs = [optim_qf1, optim_qf2] optim_alpha = torch.optim.Adam([log_alpha], 3e-4) epis = sampler.sample(pol, max_steps=32) traj = Traj() traj.add_epis(epis) traj = ef.add_next_obs(traj) traj.register_epis() result_dict = sac.train( traj, pol, qfs, targ_qfs, log_alpha, optim_pol, optim_qfs, optim_alpha, 2, 32, 0.01, 0.99, 2, ) del sampler
def test_learning(self): pol_net = PolNet(self.env.observation_space, self.env.action_space, h1=32, h2=32, deterministic=True) noise = OUActionNoise(self.env.action_space) pol = DeterministicActionNoisePol(self.env.observation_space, self.env.action_space, pol_net, noise) targ_pol_net = PolNet(self.env.observation_space, self.env.action_space, 32, 32, deterministic=True) targ_pol_net.load_state_dict(pol_net.state_dict()) targ_noise = OUActionNoise(self.env.action_space) targ_pol = DeterministicActionNoisePol(self.env.observation_space, self.env.action_space, targ_pol_net, targ_noise) qf_net = QNet(self.env.observation_space, self.env.action_space, h1=32, h2=32) qf = DeterministicSAVfunc(self.env.observation_space, self.env.action_space, qf_net) targ_qf_net = QNet(self.env.observation_space, self.env.action_space, 32, 32) targ_qf_net.load_state_dict(targ_qf_net.state_dict()) targ_qf = DeterministicSAVfunc(self.env.observation_space, self.env.action_space, targ_qf_net) sampler = EpiSampler(self.env, pol, num_parallel=1) optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4) optim_qf = torch.optim.Adam(qf_net.parameters(), 3e-4) epis = sampler.sample(pol, max_steps=32) traj = Traj() traj.add_epis(epis) traj = ef.add_next_obs(traj) traj.register_epis() result_dict = ddpg.train(traj, pol, targ_pol, qf, targ_qf, optim_pol, optim_qf, 1, 32, 0.01, 0.9) del sampler
def test_learning(self): qf_net = QNet(self.env.observation_space, self.env.action_space, 32, 32) lagged_qf_net = QNet(self.env.observation_space, self.env.action_space, 32, 32) lagged_qf_net.load_state_dict(qf_net.state_dict()) targ_qf1_net = QNet(self.env.observation_space, self.env.action_space, 32, 32) targ_qf1_net.load_state_dict(qf_net.state_dict()) targ_qf2_net = QNet(self.env.observation_space, self.env.action_space, 32, 32) targ_qf2_net.load_state_dict(lagged_qf_net.state_dict()) qf = DeterministicSAVfunc(self.env.observation_space, self.env.action_space, qf_net) lagged_qf = DeterministicSAVfunc(self.env.observation_space, self.env.action_space, lagged_qf_net) targ_qf1 = CEMDeterministicSAVfunc(self.env.observation_space, self.env.action_space, targ_qf1_net, num_sampling=60, num_best_sampling=6, num_iter=2, multivari=False) targ_qf2 = DeterministicSAVfunc(self.env.observation_space, self.env.action_space, targ_qf2_net) pol = ArgmaxQfPol(self.env.observation_space, self.env.action_space, targ_qf1, eps=0.2) sampler = EpiSampler(self.env, pol, num_parallel=1) optim_qf = torch.optim.Adam(qf_net.parameters(), 3e-4) epis = sampler.sample(pol, max_steps=32) traj = Traj() traj.add_epis(epis) traj = ef.add_next_obs(traj) traj.register_epis() result_dict = qtopt.train(traj, qf, lagged_qf, targ_qf1, targ_qf2, optim_qf, 1000, 32, 0.9999, 0.995, 'mse') del sampler
pol_net = PolNet(ob_space, ac_space, args.h1, args.h2, deterministic=True) noise = OUActionNoise(ac_space) pol = DeterministicActionNoisePol(ob_space, ac_space, pol_net, noise) targ_pol_net = PolNet(ob_space, ac_space, args.h1, args.h2, deterministic=True) targ_pol_net.load_state_dict(pol_net.state_dict()) targ_noise = OUActionNoise(ac_space.shape) targ_pol = DeterministicActionNoisePol( ob_space, ac_space, targ_pol_net, targ_noise) qf_net = QNet(ob_space, ac_space, args.h1, args.h2) qf = DeterministicSAVfunc(ob_space, ac_space, qf_net) targ_qf_net = QNet(ob_space, ac_space, args.h1, args.h2) targ_qf_net.load_state_dict(qf_net.state_dict()) targ_qf = DeterministicSAVfunc(ob_space, ac_space, targ_qf_net) sampler = EpiSampler(env, pol, num_parallel=args.num_parallel, seed=args.seed) optim_pol = torch.optim.Adam(pol_net.parameters(), args.pol_lr) optim_qf = torch.optim.Adam(qf_net.parameters(), args.qf_lr) off_traj = Traj(args.max_steps_off) total_epi = 0 total_step = 0 max_rew = -1e6 while args.max_epis > total_epi: with measure('sample'):
set_device(device) score_file = os.path.join(args.log, 'progress.csv') logger.add_tabular_output(score_file) env = GymEnv(args.env_name, log_dir=os.path.join(args.log, 'movie'), record_video=args.record) env.env.seed(args.seed) observation_space = env.observation_space action_space = env.action_space qf_net = QNet(observation_space, action_space, args.h1, args.h2) lagged_qf_net = QNet(observation_space, action_space, args.h1, args.h2) lagged_qf_net.load_state_dict(qf_net.state_dict()) targ_qf1_net = QNet(observation_space, action_space, args.h1, args.h2) targ_qf1_net.load_state_dict(qf_net.state_dict()) targ_qf2_net = QNet(observation_space, action_space, args.h1, args.h2) targ_qf2_net.load_state_dict(lagged_qf_net.state_dict()) qf = DeterministicSAVfunc(observation_space, action_space, qf_net, data_parallel=args.data_parallel) lagged_qf = DeterministicSAVfunc(observation_space, action_space, lagged_qf_net, data_parallel=args.data_parallel) targ_qf1 = CEMDeterministicSAVfunc(observation_space, action_space, targ_qf1_net,
pol_net = PolNet(ob_space, ac_space) pol = GaussianPol(ob_space, ac_space, pol_net, data_parallel=args.data_parallel, parallel_dim=0) qf_net1 = QNet(ob_space, ac_space) qf1 = DeterministicSAVfunc(ob_space, ac_space, qf_net1, data_parallel=args.data_parallel, parallel_dim=0) targ_qf_net1 = QNet(ob_space, ac_space) targ_qf_net1.load_state_dict(qf_net1.state_dict()) targ_qf1 = DeterministicSAVfunc(ob_space, ac_space, targ_qf_net1, data_parallel=args.data_parallel, parallel_dim=0) qf_net2 = QNet(ob_space, ac_space) qf2 = DeterministicSAVfunc(ob_space, ac_space, qf_net2, data_parallel=args.data_parallel, parallel_dim=0) targ_qf_net2 = QNet(ob_space, ac_space) targ_qf_net2.load_state_dict(qf_net2.state_dict()) targ_qf2 = DeterministicSAVfunc(ob_space,
# Gymのenviromentを生成 from pybullet_envs.bullet.racecarGymEnv import RacecarGymEnv env = RacecarGymEnv(renders=False, isDiscrete=False) # 観測と行動の次元 observation_space = env.observation_space action_space = env.action_space # Q-Network qf_net = QNet(observation_space, action_space, args.h1, args.h2) qf = DeterministicSAVfunc( observation_space, action_space, qf_net, data_parallel=args.data_parallel) # 決定的行動状態価値関数?q-netの出力の形を少し整える # target Q network theta1 targ_qf1_net = QNet(observation_space, action_space, args.h1, args.h2) targ_qf1_net.load_state_dict(qf_net.state_dict()) # model(重み)をロード(q-netからコピー) targ_qf1 = CEMDeterministicSAVfunc( observation_space, action_space, targ_qf1_net, num_sampling=args.num_sampling, num_best_sampling=args.num_best_sampling, num_iter=args.num_iter, multivari=args.multivari, data_parallel=args.data_parallel, save_memory=args.save_memory) #CrossEntropy Methodよくわからん # lagged network lagged_qf_net = QNet(observation_space, action_space, args.h1, args.h2) lagged_qf_net.load_state_dict( qf_net.state_dict()) # model(重み)をロード(theta1からコピー)
def main(): pygame.init() # 初期化 (w, h) = (480, 320) screen = pygame.display.set_mode((w, h), FULLSCREEN) # window size pygame.display.set_caption("Sikamaru") # window bar # initialization tx = 0 ty = 0 sika = Sikamaru((w / 2, h / 2)) sleep_count = 5 eat_mode = 100 esa = Food() wait = True seed = 42 # TODO define RL agent ''' state : 4D (sikaposi, esaposi) action : 2D (-20,+20)^2 SAC simple_net : 30,30 ''' np.random.seed(seed) torch.manual_seed(seed) low = np.zeros(4) high = w * np.ones(4) ob_space = gym.spaces.Box(low=low, high=high) ac_space = gym.spaces.Discrete(4) ac_dict = { 0: np.array([-20, 0]), 1: np.array([20, 0]), 2: np.array([0, -20]), 3: np.array([0, 20]) } pol_net = PolNet(ob_space, ac_space) pol = CategoricalPol(ob_space, ac_space, pol_net) qf_net1 = QNet(ob_space, ac_space) qf1 = DeterministicSAVfunc(ob_space, ac_space, qf_net1) targ_qf_net1 = QNet(ob_space, ac_space) targ_qf_net1.load_state_dict(qf_net1.state_dict()) targ_qf1 = DeterministicSAVfunc(ob_space, ac_space, targ_qf_net1) qf_net2 = QNet(ob_space, ac_space) qf2 = DeterministicSAVfunc(ob_space, ac_space, qf_net2) targ_qf_net2 = QNet(ob_space, ac_space) targ_qf_net2.load_state_dict(qf_net2.state_dict()) targ_qf2 = DeterministicSAVfunc(ob_space, ac_space, targ_qf_net2) qfs = [qf1, qf2] targ_qfs = [targ_qf1, targ_qf2] log_alpha = nn.Parameter(torch.ones(())) optim_pol = torch.optim.Adam(pol_net.parameters(), 1e-4) optim_qf1 = torch.optim.Adam(qf_net1.parameters(), 3e-4) optim_qf2 = torch.optim.Adam(qf_net2.parameters(), 3e-4) optim_qfs = [optim_qf1, optim_qf2] optim_alpha = torch.optim.Adam([log_alpha], 1e-4) # off_traj = Traj() while (True): screen.fill(( 0, 100, 0, )) # backgroud color # my procedure ## env obs = make_obs((tx, ty), sika.posi, w, h) ac_real, ac, a_i = pol.deterministic_ac_real( torch.tensor(obs, dtype=torch.float)) # ac_real = ac_real.reshape(pol.ac_space.shape) a = rule_act((tx, ty), sika.posi) # a = ac_dict[int(ac_real)] nx = sika.posi[0] + a[0] nx = max(min(nx, w), 0) ny = sika.posi[1] + a[1] ny = max(min(ny, h), 0) sika.move((nx, ny)) screen.blit(sika.get_im(), sika.rect) if esa.life: # RL # TOOD:record as epi screen.blit(esa.im, esa.rect) # scr rew = esa.life_step(sika) if rew > 0: sika.bigup() if esa.life == 0: pass #TODO add one epi and learn wait = False if wait: pygame.time.wait(500) wait = True pygame.display.update() # 画面更新 ## event for event in pygame.event.get(): if event.type == MOUSEBUTTONDOWN and event.button == 1: tx, ty = event.pos esa.set((tx, ty)) if event.type == KEYDOWN: if event.key == K_ESCAPE: sys.exit() if event.type == QUIT: # 終了処理 pygame.quit() sys.exit()