def dqn_argo(param_set: Parameter_Set, max_reward): # Agentの生成 netWork = Network(action_dim=2) target_network = Network(action_dim=2) agent = Agent(network=netWork, target_network=target_network, eps_start=param_set.eps_init, eps_anneal=param_set.eps_anneal, eps_min=param_set.eps_min, lr=param_set.lr, gamma=param_set.gamma) # Envの生成 env = gym.make('CartPole-v0') replay_buffer = Replay_Buffer(param_set.cap) save_reward_list = [] reward_list = [] for i in range(REWARD_SAVE_EVALUATION_SIZE): save_reward_list.append(0) for i in range(REWARD_EVALUATION_SIZE): reward_list.append(0) # データ集め(何回ゲームをやるか) for i in range(EPISODE_NUM): # Envの初期化情報の取得 state = env.reset() done = False # エピソード報酬初期化 episode_reward = 0 # 1ゲーム終了させる(Envから終了判定もらう) while not done: if i > INIT_EXPLORATION: # Actionをε-greedyで決める action = agent.get_action(state) else: action = env.action_space.sample() # Action引数にEnvからS、r,dの情報を引っ張ってくる next_state, reward, done, info = env.step(action) # エピソード報酬計算 episode_reward += reward # ReplayBufferにaddする replay_buffer.add(state, action, next_state, reward, done) # StにSt+1を代入(更新処理) state = next_state loss = tf.constant(0) if i > INIT_EXPLORATION: # ニューラルネットワーク学習 sample = replay_buffer.sample(BATCH_SIZE) if sample: loss = agent.update(replay_buffer.sample(BATCH_SIZE)) if i % param_set.q_update == 0: agent.network_synchronize() reward_list[i % REWARD_EVALUATION_SIZE] = episode_reward save_reward_list[i % REWARD_SAVE_EVALUATION_SIZE] = episode_reward if sum(save_reward_list) / len(save_reward_list) >= max_reward: print("最高記録更新!!!") agent.save(SAVE_DIRECTORY + SAVE_FILE) max_reward = sum(save_reward_list) / len(save_reward_list) return sum(reward_list) / len(reward_list), max_reward
class Train: def __init__(self, cfg): self.num_states = cfg.MODEL.SIZE_STATE self.num_actions = cfg.MODEL.SIZE_ACTION self.num_episodes = cfg.SOLVER.NUM_EPISODES self.tetris = Tetris(cfg) self.agent = Agent(cfg,self.tetris) def run(self): episode_10_list = np.zeros(10) episode_final = False reward_per_epoch = [] lifetime_per_epoch = [] for episode in range(self.num_episodes): self.tetris.init() brd, mino = self.tetris.get_state() observation = torch.tensor(np.append(brd.flatten(), mino)) state = observation state = state.type(torch.FloatTensor) state = torch.unsqueeze(state, 0) # frames = [self.env.getScreenRGB()] cum_reward = 0 t = 0 step = 0 if episode % 15 == 0: self.agent.update_target_model() while not self.tetris.check_dead(): step += 1 action = self.agent.get_action(state,mino, episode) self.tetris.update_state(action.squeeze()) rew = self.tetris.score print(rew) t += 1 brd, mino = self.tetris.get_state() observation_next = torch.tensor(np.append(brd.flatten(), mino)) done = self.tetris.check_dead() # frames.append(self.env.getScreenRGB()) # 報酬を与える。さらにepisodeの終了評価と、state_nextを設定する if done: # ステップ数が200経過するか、一定角度以上傾くとdoneはtrueになる state_next = None # 次の状態はないので、Noneを格納 # 直近10episodeの立てたstep数リストに追加 episode_10_list = np.hstack( (episode_10_list[1:], step + 1)) # 罰則を与える reward = torch.FloatTensor([-1.0]) else: if rew > 0: reward = torch.FloatTensor([1.0]) else: reward = torch.FloatTensor([0.0]) state_next = observation_next.type(torch.FloatTensor) # state_next = torch.from_numpy(state_next).type( # torch.FloatTensor) state_next = torch.unsqueeze( state_next, 0) cum_reward += rew self.agent.memorize(state, action, state_next, reward) self.agent.update_q_network() state = state_next # 終了時の処理 if done: print('%d Episode: Finished after %d steps:10試行の平均step数 = %.1lf' % ( episode, step + 1, episode_10_list.mean())) reward_per_epoch.append(cum_reward) lifetime_per_epoch.append(step + 1) break if episode_final is True: # 動画の保存と描画 display_frames_as_gif(frames) break # 50エピソード毎にlogを出力 if episode % PRINT_EVERY_EPISODE == 0: print("Episode %d finished after %f time steps" % (episode, t)) print("cumulated reward: %f" % cum_reward) # 100エピソード毎にアニメーションを作成 if episode % SHOW_GIF_EVERY_EPISODE == 0: print("len frames:", len(frames)) display_frames_as_gif(frames) continue # 2000タイムステップ以上続いたアニメーションを作成 if step > 2000: print("len frames:", len(frames)) display_frames_as_gif(frames) # モデルの保存 def save_model(): torch.save(agent.brain.model.state_dict(), 'weight.pth')
def train(sess, config): env = GymEnvironment(config) log_dir = './log/{}_lookahead_{}_gats_{}/'.format(config.env_name, config.lookahead, config.gats) checkpoint_dir = os.path.join(log_dir, 'checkpoints/') image_dir = os.path.join(log_dir, 'rollout/') if os.path.isdir(log_dir): shutil.rmtree(log_dir) print(' [*] Removed log dir: ' + log_dir) with tf.variable_scope('step'): step_op = tf.Variable(0, trainable=False, name='step') step_input = tf.placeholder('int32', None, name='step_input') step_assign_op = step_op.assign(step_input) with tf.variable_scope('summary'): scalar_summary_tags = [ 'average.reward', 'average.loss', 'average.q value', 'episode.max reward', 'episode.min reward', 'episode.avg reward', 'episode.num of game', 'training.learning_rate', 'rp.rp_accuracy', 'rp.rp_plus_accuracy', 'rp.rp_minus_accuracy', 'rp.nonzero_rp_accuracy' ] summary_placeholders = {} summary_ops = {} for tag in scalar_summary_tags: summary_placeholders[tag] = tf.placeholder('float32', None, name=tag.replace( ' ', '_')) summary_ops[tag] = tf.summary.scalar( "%s-%s/%s" % (config.env_name, config.env_type, tag), summary_placeholders[tag]) histogram_summary_tags = ['episode.rewards', 'episode.actions'] for tag in histogram_summary_tags: summary_placeholders[tag] = tf.placeholder('float32', None, name=tag.replace( ' ', '_')) summary_ops[tag] = tf.summary.histogram(tag, summary_placeholders[tag]) config.num_actions = env.action_size # config.num_actions = 3 exploration = LinearSchedule(config.epsilon_end_t, config.epsilon_end) agent = Agent(sess, config, num_actions=config.num_actions) if config.gats: lookahead = config.lookahead rp_train_frequency = 4 gdm_train_frequency = 4 gdm = GDM(sess, config, num_actions=config.num_actions) rp = RP(sess, config, num_actions=config.num_actions) leaves_size = config.num_actions**config.lookahead if config.dyna: gan_memory = GANReplayMemory(config) else: gan_memory = None def base_generator(): tree_base = np.zeros((leaves_size, lookahead)).astype('uint8') for i in range(leaves_size): n = i j = 0 while n: n, r = divmod(n, config.num_actions) tree_base[i, lookahead - 1 - j] = r j = j + 1 return tree_base tree_base = base_generator() # memory = ReplayMemory(config) memory = ReplayMemory(config, log_dir) history = History(config) tf.global_variables_initializer().run() saver = tf.train.Saver(max_to_keep=30) # model load, if exist ckpt. load_model(sess, saver, checkpoint_dir) agent.updated_target_q_network() writer = tf.summary.FileWriter(log_dir, sess.graph) num_game, update_count, ep_reward = 0, 0, 0. total_reward, total_loss, total_q_value = 0., 0., 0. max_avg_ep_reward = -100 ep_rewards, actions = [], [] rp_accuracy = [] rp_plus_accuracy = [] rp_minus_accuracy = [] nonzero_rp_accuracy = [] screen, reward, action, terminal = env.new_random_game() # init state for _ in range(config.history_length): history.add(screen) start_step = step_op.eval() # main for step in tqdm(range(start_step, config.max_step), ncols=70, initial=start_step): if step == config.learn_start: num_game, update_count, ep_reward = 0, 0, 0. total_reward, total_loss, total_q_value = 0., 0., 0. ep_rewards, actions = [], [] if step == config.gan_dqn_learn_start: rp_accuracy = [] rp_plus_accuracy = [] rp_minus_accuracy = [] nonzero_rp_accuracy = [] # ε-greedy MCTS_FLAG = False epsilon = exploration.value(step) if random.random() < epsilon: action = random.randrange(config.num_actions) else: current_state = norm_frame(np.expand_dims(history.get(), axis=0)) if config.gats and (step >= config.gan_dqn_learn_start): action, predicted_reward = MCTS_planning( gdm, rp, agent, current_state, leaves_size, tree_base, config, exploration, step, gan_memory) MCTS_FLAG = True else: action = agent.get_action( norm_frame_Q(unnorm_frame(current_state))) # GATS用? apply_action = action # if int(apply_action != 0): # apply_action += 1 # Observe screen, reward, terminal = env.act(apply_action, is_training=True) reward = max(config.min_reward, min(config.max_reward, reward)) history.add(screen) memory.add(screen, reward, action, terminal) if MCTS_FLAG: rp_accuracy.append(int(predicted_reward == reward)) if reward != 0: nonzero_rp_accuracy.append(int(predicted_reward == reward)) if reward == 1: rp_plus_accuracy.append(int(predicted_reward == reward)) elif reward == -1: rp_minus_accuracy.append(int(predicted_reward == reward)) # Train if step > config.gan_learn_start and config.gats: if step % rp_train_frequency == 0 and memory.can_sample( config.rp_batch_size): obs, act, rew = memory.reward_sample(config.rp_batch_size) # obs, act, rew = memory.reward_sample2( # config.rp_batch_size, config.lookahead) reward_obs, reward_act, reward_rew = memory.reward_sample( config.nonzero_batch_size, nonzero=True) # reward_obs, reward_act, reward_rew = memory.nonzero_reward_sample( # config.rp_batch_size, config.lookahead) obs_batch = norm_frame( np.concatenate((obs, reward_obs), axis=0)) act_batch = np.concatenate((act, reward_act), axis=0) rew_batch = np.concatenate((rew, reward_rew), axis=0) reward_label = rew_batch + 1 trajectories = gdm.get_state(obs_batch, act_batch[:, :-1]) rp_summary = rp.train(trajectories, act_batch, reward_label) writer.add_summary(rp_summary, step) if step % gdm_train_frequency == 0 and memory.can_sample( config.gan_batch_size): state_batch, action_batch, next_state_batch = memory.GAN_sample( ) # state_batch, act_batch, next_state_batch = memory.GAN_sample2( # config.gan_batch_size, config.lookahead) # gdm.summary, disc_summary, merged_summary = gdm.train( # norm_frame(state_batch), act_batch, norm_frame(next_state_batch), warmup_bool) gdm.summary, disc_summary = gdm.train( norm_frame(state_batch), action_batch, norm_frame(next_state_batch)) if step > config.learn_start: # if step % config.train_frequency == 0 and memory.can_sample(config.batch_size): if step % config.train_frequency == 0: # s_t, act_batch, rew_batch, s_t_plus_1, terminal_batch = memory.sample( # config.batch_size, config.lookahead) s_t, act_batch, rew_batch, s_t_plus_1, terminal_batch = memory.sample( ) s_t, s_t_plus_1 = norm_frame(s_t), norm_frame(s_t_plus_1) if config.gats and config.dyna: if step > config.gan_dqn_learn_start and gan_memory.can_sample( config.batch_size): gan_obs_batch, gan_act_batch, gan_rew_batch, gan_terminal_batch = gan_memory.sample( ) # gan_obs_batch, gan_act_batch, gan_rew_batch = gan_memory.sample( # config.batch_size) gan_obs_batch = norm_frame(gan_obs_batch) trajectories = gdm.get_state( gan_obs_batch, np.expand_dims(gan_act_batch, axis=1)) gan_next_obs_batch = trajectories[:, -config. history_length:, ...] # gan_obs_batch, gan_next_obs_batch = \ # norm_frame(gan_obs_batch), norm_frame(gan_next_obs_batch) s_t = np.concatenate([s_t, gan_obs_batch], axis=0) act_batch = np.concatenate([act_batch, gan_act_batch], axis=0) rew_batch = np.concatenate([rew_batch, gan_rew_batch], axis=0) s_t_plus_1 = np.concatenate( [s_t_plus_1, gan_next_obs_batch], axis=0) terminal_batch = np.concatenate( [terminal_batch, gan_terminal_batch], axis=0) s_t, s_t_plus_1 = norm_frame_Q( unnorm_frame(s_t)), norm_frame_Q(unnorm_frame(s_t_plus_1)) q_t, loss, dqn_summary = agent.train(s_t, act_batch, rew_batch, s_t_plus_1, terminal_batch, step) writer.add_summary(dqn_summary, step) total_loss += loss total_q_value += q_t.mean() update_count += 1 if step % config.target_q_update_step == config.target_q_update_step - 1: agent.updated_target_q_network() # reinit if terminal: screen, reward, action, terminal = env.new_random_game() num_game += 1 ep_rewards.append(ep_reward) ep_reward = 0. else: ep_reward += reward total_reward += reward # change train freqancy if config.gats: if step == 10000 - 1: rp_train_frequency = 8 gdm_train_frequency = 8 if step == 50000 - 1: rp_train_frequency = 16 gdm_train_frequency = 16 if step == 100000 - 1: rp_train_frequency = 24 gdm_train_frequency = 24 # rolloutを行い画像を保存 if config.gats and step % config._test_step == config._test_step - 1: rollout_image(config, image_dir, gdm, memory, step + 1, 16) # calcurate infometion if step >= config.learn_start: if step % config._test_step == config._test_step - 1: # plot if config.gats: writer.add_summary(gdm.summary, step) writer.add_summary(disc_summary, step) avg_reward = total_reward / config._test_step avg_loss = total_loss / update_count avg_q = total_q_value / update_count try: max_ep_reward = np.max(ep_rewards) min_ep_reward = np.min(ep_rewards) avg_ep_reward = np.mean(ep_rewards) except: max_ep_reward, min_ep_reward, avg_ep_reward = 0, 0, 0 print( '\navg_r: %.4f, avg_l: %.6f, avg_q: %3.6f, avg_ep_r: %.4f, max_ep_r: %.4f, min_ep_r: %.4f, # game: %d' % (avg_reward, avg_loss, avg_q, avg_ep_reward, max_ep_reward, min_ep_reward, num_game)) # require terget q network if max_avg_ep_reward * 0.9 <= avg_ep_reward: step_assign_op.eval({step_input: step + 1}) save_model(sess, saver, checkpoint_dir, step + 1) max_avg_ep_reward = max(max_avg_ep_reward, avg_ep_reward) if step >= config.gan_dqn_learn_start: if len(rp_accuracy) > 0: rp_accuracy = np.mean(rp_accuracy) rp_plus_accuracy = np.mean(rp_plus_accuracy) rp_minus_accuracy = np.mean(rp_minus_accuracy) nonzero_rp_accuracy = np.mean(nonzero_rp_accuracy) else: rp_accuracy = 0 rp_plus_accuracy = 0 rp_minus_accuracy = 0 nonzero_rp_accuracy = 0 else: rp_accuracy = 0 rp_plus_accuracy = 0 rp_minus_accuracy = 0 nonzero_rp_accuracy = 0 # summary if step > 180: inject_summary( sess, writer, summary_ops, summary_placeholders, { 'average.reward': avg_reward, 'average.loss': avg_loss, 'average.q value': avg_q, 'episode.max reward': max_ep_reward, 'episode.min reward': min_ep_reward, 'episode.avg reward': avg_ep_reward, 'episode.num of game': num_game, 'episode.rewards': ep_rewards, 'episode.actions': actions, 'rp.rp_accuracy': rp_accuracy, 'rp.rp_plus_accuracy': rp_plus_accuracy, 'rp.rp_minus_accuracy': rp_minus_accuracy, 'rp.nonzero_rp_accuracy': nonzero_rp_accuracy }, step) num_game = 0 total_reward = 0. total_loss = 0. total_q_value = 0. update_count = 0 ep_reward = 0. ep_rewards = [] actions = [] rp_accuracy = [] rp_plus_accuracy = [] rp_minus_accuracy = [] nonzero_rp_accuracy = []