def generate_comparison_finetune(): # modify default args args = get_args() args.env_name = 'PusherEnv-v0' args.num_processes = 1 args.num_steps=1000 args.num_env_steps=301000 args.save_interval=1 args.cuda = False args.algo = "ppo" scratch_rewards, scratch_times = train_ppo_from_scratch(args) args.algo = "ppo_fine_tune" vanilla_rewards, vanilla_times = train_ppo_fine_tune(args) args.algo = "ppo_fine_tune_jointloss" joint_rewards, joint_times = train_ppo_fine_tune_joint(args) plt.figure() plt.plot(scratch_times, scratch_rewards) plt.plot(vanilla_times, vanilla_rewards) plt.plot(joint_times, joint_rewards) plt.xlabel("Timesteps") plt.ylabel("Mean Episode Reward (10 episodes)") plt.legend(["PPO from scratch", "PPO with expert, vanilla", "PPO with expert, joint loss"]) plt.show() plt.savefig("ppo_comparison.png")
def __init__(self, optional_args=None): start_time_str = datetime.now().strftime("%m-%d-%Y-%H-%M-%S") self.args = get_args(optional_args) args = self.args assert args.algo == 'ppo' self.config_parameters = "" if args.env_name == "Warehouse": self.config_parameters = read_parameters( 'parameters', 'warehouse/' + args.yaml_file) elif "Breakout" in args.env_name: self.config_parameters = read_parameters('parameters', 'atari/' + args.yaml_file) self.model_file_name = args.env_name + "_" + start_time_str + ".pt" self.data_saver = DataSaver(start_time_str) line = "Starting new run: with args " + args.__str__() self.data_saver.append(line) print(line) line = "And parameters: " + self.config_parameters.__str__() self.data_saver.append(line) print(line)
def test_main(): logger = mp.log_to_stderr() logger.setLevel(logging.WARNING) args = get_args() datas = [] # for i in range(0, args.num_env_steps // (args.num_steps * args.num_processes) + 1): # for i in range(0, 1): for i in [-1]: # print("i:", i) # args.reseed_step = i * args.num_steps * args.num_processes args.reseed_step = -1 # args.guided_updates = i for z in range(0, 20): args.reseed_z = z + 1 close_to = main(args, logger) print(i, z, close_to) datas.append((i, z, close_to)) import joblib joblib.dump(datas, "{}-{}.data".format(args.test_branching_name, args.seed))
def save_trajectories_images(obs, acts, rews, eps): # Save images only args = get_args() obs_path = [] acts = np.array(acts) rews = np.array(rews) eps = np.array(eps) print(acts.shape, rews.shape, eps.shape) # Get image dir to save save_dir = os.path.join( args.save_dir, args.load_model_name, ) image_dir = os.path.join(save_dir, 'images') os.makedirs(save_dir, exist_ok=True) os.makedirs(image_dir, exist_ok=True) image_id = 0 for ob in obs: # Scaled image from [0, 1] path = os.path.join(image_dir, str(image_id)) obimg = (ob * 255).astype(np.uint8).transpose(1, 2, 0) # [H, W, C] # Save image and record image path np.save(path, obimg) obs_path.append(path) image_id += 1 expert_dict = { 'obs': obs_path, 'actions': acts, 'rewards': rews, 'episode_starts': eps, } torch.save(expert_dict, os.path.join(save_dir, 'expert_data.pkl')) print("Saved")
def main(): args = get_args() load = False if args.load_time_label is not None: load_time_label = args.load_time_label load_path = os.path.join('trained_models', args.load_time_label) args = np.load(os.path.join(load_path, 'args.npy'), allow_pickle=True)[0] args.total_step = 1e8 load = True else: print('error: no load_time_label') exit(1) save_path = os.path.join(args.save_dir, load_time_label) save_path = os.path.join(save_path, 'gif') try: os.makedirs(save_path) except OSError: pass out_file = open(os.path.join(save_path, "gif_out.txt"), "w") if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.set_num_threads(8) device = torch.device("cuda" if args.cuda else "cpu") set_v('device', device) n_agent = args.num_controlled_agent agent = torch.load(os.path.join(load_path, 'agent.pt'), map_location='cuda:' + str(gpu_id)) evaluate(args, agent, None, None, args.seed, args.num_processes, None, device, n_agent, out_file)
def record_trajectories(): args = get_args() print(args) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # Append the model name log_dir = os.path.expanduser(args.log_dir) log_dir = os.path.join(log_dir, args.model_name, str(args.seed)) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") envs = make_vec_envs(args.env_name, args.seed, 1, args.gamma, log_dir, device, True, training=False) # Take activation for carracing print("Loaded env...") activation = None if args.env_name == 'CarRacing-v0' and args.use_activation: activation = torch.tanh print(activation) actor_critic = Policy( envs.observation_space.shape, envs.action_space, base_kwargs={ 'recurrent': args.recurrent_policy, 'env': args.env_name }, activation=activation, ) actor_critic.to(device) # Load from previous model if args.load_model_name: loaddata = torch.load( os.path.join(args.save_dir, args.load_model_name, args.load_model_name + '_{}.pt'.format(args.seed))) state = loaddata[0] try: obs_rms, ret_rms = loaddata[1:] # Feed it into the env envs.obs_rms = None envs.ret_rms = None except: print("Couldnt load obsrms") obs_rms = ret_rms = None try: actor_critic.load_state_dict(state) except: actor_critic = state else: raise NotImplementedError # Record trajectories actions = [] rewards = [] observations = [] episode_starts = [] for eps in range(args.num_episodes): obs = envs.reset() # Init variables for storing episode_starts.append(True) reward = 0 while True: # Take action act = actor_critic.act(obs, None, None, None)[1] next_state, rew, done, info = envs.step(act) #print(obs.shape, act.shape, rew.shape, done) reward += rew # Add the current observation and act observations.append(obs.data.cpu().numpy()[0]) # [C, H, W] actions.append(act.data.cpu().numpy()[0]) # [A] rewards.append(rew[0, 0].data.cpu().numpy()) if done[0]: break episode_starts.append(False) obs = next_state + 0 print("Total reward: {}".format(reward[0, 0].data.cpu().numpy())) # Save these values save_trajectories_images(observations, actions, rewards, episode_starts)
def main(): args = get_args() # Record trajectories if args.record_trajectories: record_trajectories() return print(args) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # Append the model name log_dir = os.path.expanduser(args.log_dir) log_dir = os.path.join(log_dir, args.model_name, str(args.seed)) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, log_dir, device, False) # Take activation for carracing print("Loaded env...") activation = None if args.env_name == 'CarRacing-v0' and args.use_activation: activation = torch.tanh print(activation) actor_critic = Policy(envs.observation_space.shape, envs.action_space, base_kwargs={ 'recurrent': args.recurrent_policy, 'env': args.env_name }, activation=activation) actor_critic.to(device) # Load from previous model if args.load_model_name: state = torch.load( os.path.join(args.save_dir, args.load_model_name, args.load_model_name + '_{}.pt'.format(args.seed)))[0] try: actor_critic.load_state_dict(state) except: actor_critic = state if args.algo == 'a2c': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo': agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) elif args.algo == 'acktr': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) if args.gail: if len(envs.observation_space.shape) == 1: discr = gail.Discriminator( envs.observation_space.shape[0] + envs.action_space.shape[0], 100, device) file_name = os.path.join( args.gail_experts_dir, "trajs_{}.pt".format(args.env_name.split('-')[0].lower())) expert_dataset = gail.ExpertDataset(file_name, num_trajectories=3, subsample_frequency=1) expert_dataset_test = gail.ExpertDataset(file_name, num_trajectories=1, start=3, subsample_frequency=1) drop_last = len(expert_dataset) > args.gail_batch_size gail_train_loader = torch.utils.data.DataLoader( dataset=expert_dataset, batch_size=args.gail_batch_size, shuffle=True, drop_last=drop_last) gail_test_loader = torch.utils.data.DataLoader( dataset=expert_dataset_test, batch_size=args.gail_batch_size, shuffle=False, drop_last=False) print(len(expert_dataset), len(expert_dataset_test)) else: # env observation shape is 3 => its an image assert len(envs.observation_space.shape) == 3 discr = gail.CNNDiscriminator(envs.observation_space.shape, envs.action_space, 100, device) file_name = os.path.join(args.gail_experts_dir, 'expert_data.pkl') expert_dataset = gail.ExpertImageDataset(file_name, train=True) test_dataset = gail.ExpertImageDataset(file_name, train=False) gail_train_loader = torch.utils.data.DataLoader( dataset=expert_dataset, batch_size=args.gail_batch_size, shuffle=True, drop_last=len(expert_dataset) > args.gail_batch_size, ) gail_test_loader = torch.utils.data.DataLoader( dataset=test_dataset, batch_size=args.gail_batch_size, shuffle=False, drop_last=len(test_dataset) > args.gail_batch_size, ) print('Dataloader size', len(gail_train_loader)) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() #num_updates = int( #args.num_env_steps) // args.num_steps // args.num_processes num_updates = args.num_steps print(num_updates) # count the number of times validation loss increases val_loss_increase = 0 prev_val_action = np.inf best_val_loss = np.inf for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) # Observe reward and next obs obs, reward, done, infos = envs.step(action) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() if args.gail: if j >= 10: try: envs.venv.eval() except: pass gail_epoch = args.gail_epoch #if j < 10: #gail_epoch = 100 # Warm up for _ in range(gail_epoch): #discr.update(gail_train_loader, rollouts, #None) pass for step in range(args.num_steps): rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma, rollouts.masks[step]) rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) #value_loss, action_loss, dist_entropy = agent.update(rollouts) value_loss = 0 dist_entropy = 0 for data in gail_train_loader: expert_states, expert_actions = data expert_states = Variable(expert_states).to(device) expert_actions = Variable(expert_actions).to(device) loss = agent.update_bc(expert_states, expert_actions) action_loss = loss.data.cpu().numpy() print("Epoch: {}, Loss: {}".format(j, action_loss)) with torch.no_grad(): cnt = 0 val_action_loss = 0 for data in gail_test_loader: expert_states, expert_actions = data expert_states = Variable(expert_states).to(device) expert_actions = Variable(expert_actions).to(device) loss = agent.get_action_loss(expert_states, expert_actions) val_action_loss += loss.data.cpu().numpy() cnt += 1 val_action_loss /= cnt print("Val Loss: {}".format(val_action_loss)) #rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": if val_action_loss < best_val_loss: val_loss_increase = 0 best_val_loss = val_action_loss save_path = os.path.join(args.save_dir, args.model_name) try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic.state_dict(), getattr(utils.get_vec_normalize(envs), 'ob_rms', None), getattr(utils.get_vec_normalize(envs), 'ret_rms', None) ], os.path.join( save_path, args.model_name + "_{}.pt".format(args.seed))) elif val_action_loss > prev_val_action: val_loss_increase += 1 if val_loss_increase == 10: print("Val loss increasing too much, breaking here...") break elif val_action_loss < prev_val_action: val_loss_increase = 0 # Update prev val action prev_val_action = val_action_loss # log interval if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device)
def main(): args = get_args() torch.set_num_threads(1) # device = torch.device("cuda:0" if args.cuda else "cpu") device = torch.device("cpu") # args.env_name = 'Pong-ramNoFrameskip-v4' args.env_name = 'Pong-ram-v0' args.num_processes = 2 envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, False) # ss('here') actor_critic = Policy(envs.observation_space.shape, envs.action_space, base_kwargs={'recurrent': args.recurrent_policy}) actor_critic.to(device) print(args.recurrent_policy) print(args.clip_param) print(args.ppo_epoch) print('ccccccccc') print(args.num_mini_batch) print(args.value_loss_coef) print(args.entropy_coef) print('dddddddddddd') print(args.lr) print(args.eps) print(args.max_grad_norm) ss('in main, after actor_critic') args.num_mini_batch = 2 agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) # ss('out of define ppo') args.num_steps = 4 rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) # ss('rollouts') obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes # print(args.num_env_steps) # print() # ss('pp') sum_re = torch.zeros(args.num_processes, 1) # print(sum_re.shape) for j in range(num_updates): # ss('pp') is_any_done = False for step in range(args.num_steps): # for step in range(50000): # print(step) # ss('pp') # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) # print(value) # print(action_log_prob) # print(action) # ss('runner') # Obser reward and next obs obs, reward, done, infos = envs.step(action) sum_re += reward # print('- --'*20) # print(reward) # print(sum_re) # print() # print(reward.shape) if any(done): # print(sum_re) # print(done) # input('hi') # is_any_done = True for i in range(len(done)): if done[i]: # print(i) # print(*sum_re[i]) # print(sum_re[i].item()) episode_rewards.append(sum_re[i].item()) # print(sum_re[i]) sum_re[i] *= 0 # pass # episode_rewards.append(reward.item()) # ss('make reward') # print(infos) # ss('runner') for info in infos: # print(info) if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) print('what env info with episode do?', info.keys()) # ss('break') # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) # ss('runner') with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, is_any_done, args.use_proper_time_limits) # ss('runner1') value_loss, action_loss, dist_entropy = agent.update(rollouts) # ss('runner1') rollouts.after_update() # ss('runner2') # save for every interval-th episode or for the last epoch # if (j % args.save_interval == 0 # or j == num_updates - 1) and args.save_dir != "": # save_path = os.path.join(args.save_dir, args.algo) # try: # os.makedirs(save_path) # except OSError: # pass # # torch.save([ # actor_critic, # getattr(utils.get_vec_normalize(envs), 'ob_rms', None) # ], os.path.join(save_path, args.env_name + ".pt")) # print(args.log_interval) args.log_interval = 100 if j % args.log_interval == 0 and len(episode_rewards) > 1: # if j % args.log_interval == 0: # and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n Ent {},V {},A {}" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss))
def main(): args = get_args() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # logdir = os.path.join(os.path.expanduser(args.log_dir), 'runs') logdir = os.path.expanduser(args.log_dir) # Ugly but simple logging log_dict = { 'task_steps': args.task_steps, 'grad_noise_ratio': args.grad_noise_ratio, 'max_task_grad_norm': args.max_task_grad_norm, 'use_noisygrad': args.use_noisygrad, 'use_pcgrad': args.use_noisygrad, 'use_privacy': args.use_privacy, 'seed': args.seed, 'cmd': ' '.join(sys.argv[1:]) } for eval_disp_name, eval_env_name in EVAL_ENVS.items(): log_dict[eval_disp_name] = [] # if (args.eval_interval is not None and len(episode_rewards) > 1 # and j % args.eval_interval == 0): # actor_critic.eval() # obs_rms = utils.get_vec_normalize(envs).obs_rms # eval_r = {} # for eval_disp_name, eval_env_name in EVAL_ENVS.items(): # print(eval_disp_name) # eval_r[eval_disp_name] = evaluate(actor_critic, obs_rms, eval_env_name, args.seed, # args.num_processes, logdir, device, steps=args.task_steps) # summary_writer.add_scalar(f'eval/{eval_disp_name}', eval_r[eval_disp_name], (j+1) * args.num_processes * args.num_steps) # log_dict[eval_disp_name].append([(j+1) * args.num_processes * args.num_steps, eval_r[eval_disp_name]]) # summary_writer.add_scalars('eval_combined', eval_r, (j+1) * args.num_processes * args.num_steps) # actor_critic.train() # res_search = [ # [{'use_noisygrad': False, # 'task_steps': 20}, 'baseline'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.7}, 'noise=1.7'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.5}, 'noise=1.5'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.0}, 'noise=1.0'] # ] res_search = [ # [{'use_noisygrad': False, # 'use_privacy': False, # 'use_pcgrad': False, # 'seed': 2, # 'task_steps': 20}, 'baseline'], # [{'use_privacy': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.0, # 'max_task_grad_norm': 1.0}, 'privacy=1.0'], # [{'use_privacy': True, # 'task_steps': 20, # 'grad_noise_ratio': 0.2, # 'max_task_grad_norm': 1.0}, 'privacy=0.2'], [{ 'use_testgrad': True, 'task_steps': 20, 'grad_noise_ratio': 0.03 }, 'testgrad 0.03'], # [{'use_testgrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 0.05}, 'testgrad 0.05'], [{ 'use_testgrad': True, 'task_steps': 20, 'grad_noise_ratio': 1.0 }, 'testgrad 1.0 '], # [{'use_testgrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 0.1}, 'testgrad 0.1'], [{ 'use_testgrad': False, 'task_steps': 20 }, 'baseline'], # [{'use_pcgrad': True, # 'task_steps': 20}, 'testgrad_'], # [{'use_privacy': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.0, # 'max_task_grad_norm': 3.0}, 'privacy=1.0 grad norm 3.0'], # [{'use_pcgrad': True, # 'task_steps': 20}, 'pcgrad=1.0'], # [{'use_privacy': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.3}, 'privacy=1.3'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.0, # 'max_task_grad_norm': 1.0}, 'noise=1.0 norm 1.0'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.0, # 'max_task_grad_norm': 0.5}, 'noise=1.0 norm 0.5'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.0, # 'max_task_grad_norm': 0.75}, 'noise=1.0 norm 0.75'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.0, # 'max_task_grad_norm': 0.2}, 'noise=1.0 norm 0.2'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.3, # 'max_task_grad_norm': 1.0}, 'noise=1.3'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.5, # 'max_task_grad_norm': 1.0}, 'noise=1.5'], # [{'use_noisygrad': True, # 'task_steps': 20, # 'grad_noise_ratio': 1.7, # 'max_task_grad_norm': 1.0}, 'noise=1.7'] ] for s in res_search: res_many = [] res_five = [] res_type = [] for subdir, dirs, files in os.walk(logdir): for name in dirs: load_name = os.path.join(logdir, name, 'log_dict.pkl') try: log_dict = load_obj(load_name) except: continue is_match = True for key, val in s[0].items(): if log_dict[key] != val: is_match = False if is_match: res_many.append(log_dict['many_arms']) res_five.append(log_dict['five_arms']) res_type.append(log_dict['use_noisygrad']) if len(res_many) > 0: res_many = np.array(res_many) res_five = np.array(res_five) t = res_many[0, :, 0] res_many_mean = np.mean(res_many[:, :, 1], axis=0) res_many_std = np.std(res_many[:, :, 1], axis=0) res_five_mean = np.mean(res_five[:, :, 1], axis=0) res_five_std = np.std(res_five[:, :, 1], axis=0) # plt.errorbar(t, res_many_mean, res_many_std, label=s[1]) # plt.errorbar(t, res_five_mean, res_five_std, label=s[1]) for i in range(res_many.shape[0]): # plt.plot(res_many[i,:,0], res_many[i,:,1], label=s[1]) plt.plot(res_five[i, :, 0], res_five[i, :, 1], label=s[1]) plt.legend() plt.show() import pdb pdb.set_trace()
def main(): args = get_args() import random random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) np.random.seed(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True logdir = args.env_name + '_' + args.algo + '_num_arms_' + str( args.num_processes) + '_' + time.strftime("%d-%m-%Y_%H-%M-%S") if args.use_privacy: logdir = logdir + '_privacy' elif args.use_noisygrad: logdir = logdir + '_noisygrad' elif args.use_pcgrad: logdir = logdir + '_pcgrad' elif args.use_testgrad: logdir = logdir + '_testgrad' elif args.use_median_grad: logdir = logdir + '_mediangrad' logdir = os.path.join('runs', logdir) logdir = os.path.join(os.path.expanduser(args.log_dir), logdir) utils.cleanup_log_dir(logdir) # Ugly but simple logging log_dict = { 'task_steps': args.task_steps, 'grad_noise_ratio': args.grad_noise_ratio, 'max_task_grad_norm': args.max_task_grad_norm, 'use_noisygrad': args.use_noisygrad, 'use_pcgrad': args.use_pcgrad, 'use_testgrad': args.use_testgrad, 'use_testgrad_median': args.use_testgrad_median, 'testgrad_quantile': args.testgrad_quantile, 'median_grad': args.use_median_grad, 'use_meanvargrad': args.use_meanvargrad, 'meanvar_beta': args.meanvar_beta, 'no_special_grad_for_critic': args.no_special_grad_for_critic, 'use_privacy': args.use_privacy, 'seed': args.seed, 'recurrent': args.recurrent_policy, 'obs_recurrent': args.obs_recurrent, 'cmd': ' '.join(sys.argv[1:]) } for eval_disp_name, eval_env_name in EVAL_ENVS.items(): log_dict[eval_disp_name] = [] summary_writer = SummaryWriter() summary_writer.add_hparams( { 'task_steps': args.task_steps, 'grad_noise_ratio': args.grad_noise_ratio, 'max_task_grad_norm': args.max_task_grad_norm, 'use_noisygrad': args.use_noisygrad, 'use_pcgrad': args.use_pcgrad, 'use_testgrad': args.use_testgrad, 'use_testgrad_median': args.use_testgrad_median, 'testgrad_quantile': args.testgrad_quantile, 'median_grad': args.use_median_grad, 'use_meanvargrad': args.use_meanvargrad, 'meanvar_beta': args.meanvar_beta, 'no_special_grad_for_critic': args.no_special_grad_for_critic, 'use_privacy': args.use_privacy, 'seed': args.seed, 'recurrent': args.recurrent_policy, 'obs_recurrent': args.obs_recurrent, 'cmd': ' '.join(sys.argv[1:]) }, {}) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") print('making envs...') envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, False, steps=args.task_steps, free_exploration=args.free_exploration, recurrent=args.recurrent_policy, obs_recurrent=args.obs_recurrent, multi_task=True) val_envs = make_vec_envs(args.val_env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, False, steps=args.task_steps, free_exploration=args.free_exploration, recurrent=args.recurrent_policy, obs_recurrent=args.obs_recurrent, multi_task=True) eval_envs_dic = {} for eval_disp_name, eval_env_name in EVAL_ENVS.items(): eval_envs_dic[eval_disp_name] = make_vec_envs( eval_env_name[0], args.seed, args.num_processes, None, logdir, device, True, steps=args.task_steps, recurrent=args.recurrent_policy, obs_recurrent=args.obs_recurrent, multi_task=True, free_exploration=args.free_exploration) prev_eval_r = {} print('done') if args.hard_attn: actor_critic = Policy(envs.observation_space.shape, envs.action_space, base=MLPHardAttnBase, base_kwargs={ 'recurrent': args.recurrent_policy or args.obs_recurrent }) else: actor_critic = Policy(envs.observation_space.shape, envs.action_space, base=MLPAttnBase, base_kwargs={ 'recurrent': args.recurrent_policy or args.obs_recurrent }) actor_critic.to(device) if (args.continue_from_epoch > 0) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) actor_critic_, loaded_obs_rms_ = torch.load( os.path.join( save_path, args.env_name + "-epoch-{}.pt".format(args.continue_from_epoch))) actor_critic.load_state_dict(actor_critic_.state_dict()) if args.algo != 'ppo': raise "only PPO is supported" agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, num_tasks=args.num_processes, attention_policy=False, max_grad_norm=args.max_grad_norm, weight_decay=args.weight_decay) val_agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.val_lr, eps=args.eps, num_tasks=args.num_processes, attention_policy=True, max_grad_norm=args.max_grad_norm, weight_decay=args.weight_decay) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) val_rollouts = RolloutStorage(args.num_steps, args.num_processes, val_envs.observation_space.shape, val_envs.action_space, actor_critic.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) val_obs = val_envs.reset() val_rollouts.obs[0].copy_(val_obs) val_rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes save_copy = True for j in range(args.continue_from_epoch, args.continue_from_epoch + num_updates): # policy rollouts for step in range(args.num_steps): # Sample actions actor_critic.eval() with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) actor_critic.train() # Obser reward and next obs obs, reward, done, infos = envs.step(action) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) for k, v in info['episode'].items(): summary_writer.add_scalar( f'training/{k}', v, j * args.num_processes * args.num_steps + args.num_processes * step) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) actor_critic.eval() with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() actor_critic.train() rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) if save_copy: prev_weights = copy.deepcopy(actor_critic.state_dict()) prev_opt_state = copy.deepcopy(agent.optimizer.state_dict()) prev_val_opt_state = copy.deepcopy( val_agent.optimizer.state_dict()) save_copy = False value_loss, action_loss, dist_entropy = agent.update(rollouts) rollouts.after_update() # validation rollouts for val_iter in range(args.val_agent_steps): for step in range(args.num_steps): # Sample actions actor_critic.eval() with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( val_rollouts.obs[step], val_rollouts.recurrent_hidden_states[step], val_rollouts.masks[step]) actor_critic.train() # Obser reward and next obs obs, reward, done, infos = val_envs.step(action) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) val_rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) actor_critic.eval() with torch.no_grad(): next_value = actor_critic.get_value( val_rollouts.obs[-1], val_rollouts.recurrent_hidden_states[-1], val_rollouts.masks[-1]).detach() actor_critic.train() val_rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) val_value_loss, val_action_loss, val_dist_entropy = val_agent.update( val_rollouts) val_rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'obs_rms', None) ], os.path.join(save_path, args.env_name + "-epoch-{}.pt".format(j))) if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) revert = False if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): actor_critic.eval() obs_rms = utils.get_vec_normalize(envs).obs_rms eval_r = {} printout = f'Seed {args.seed} Iter {j} ' for eval_disp_name, eval_env_name in EVAL_ENVS.items(): eval_r[eval_disp_name] = evaluate( actor_critic, obs_rms, eval_envs_dic, eval_disp_name, args.seed, args.num_processes, eval_env_name[1], logdir, device, steps=args.task_steps, recurrent=args.recurrent_policy, obs_recurrent=args.obs_recurrent, multi_task=True, free_exploration=args.free_exploration) if eval_disp_name in prev_eval_r: diff = np.array(eval_r[eval_disp_name]) - np.array( prev_eval_r[eval_disp_name]) if eval_disp_name == 'many_arms': if np.sum(diff > 0) - np.sum( diff < 0) < args.val_improvement_threshold: print('no update') revert = True summary_writer.add_scalar(f'eval/{eval_disp_name}', np.mean(eval_r[eval_disp_name]), (j + 1) * args.num_processes * args.num_steps) log_dict[eval_disp_name].append([ (j + 1) * args.num_processes * args.num_steps, eval_r[eval_disp_name] ]) printout += eval_disp_name + ' ' + str( np.mean(eval_r[eval_disp_name])) + ' ' # summary_writer.add_scalars('eval_combined', eval_r, (j+1) * args.num_processes * args.num_steps) if revert: actor_critic.load_state_dict(prev_weights) agent.optimizer.load_state_dict(prev_opt_state) val_agent.optimizer.load_state_dict(prev_val_opt_state) else: print(printout) prev_eval_r = eval_r.copy() save_copy = True actor_critic.train() save_obj(log_dict, os.path.join(logdir, 'log_dict.pkl')) envs.close() val_envs.close() for eval_disp_name, eval_env_name in EVAL_ENVS.items(): eval_envs_dic[eval_disp_name].close()
action_loss)) episode_reward_means.append(np.mean(episode_rewards)) episode_reward_times.append(total_num_steps) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device) print(episode_reward_means, episode_reward_times) return episode_reward_means, episode_reward_times if __name__ == "__main__": # register environment gym.register(id='PusherEnv-v0', entry_point='pusher_goal:PusherEnv', kwargs={}) # modify default args args = get_args() args.env_name = 'PusherEnv-v0' args.algo = "ppo" args.num_processes = 1 args.num_steps = 1000 args.cuda = True train_ppo_from_scratch(args)
def main(): torch.set_printoptions(precision=5, sci_mode=False) args = get_args() _make_env = partial(make_env, args.env_name, args.episode_steps, args.env_config) env = _make_env() agents = [] for i, agent in enumerate(env.agents): obs_space = env.observation_spaces[agent] act_space = env.action_spaces[agent] input_structure = env.input_structures[agent] actor_critic, _ = get_agent(agent, args, obs_space, input_structure, act_space, None, n_ref=args.num_refs[i], is_ref=False) load_actor_critic(actor_critic, args.load_dir, agent, args.load_step) agents.append(actor_critic) env.seed(np.random.randint(10000)) experiences = sample_games(env, agents, 1, args) # score = np.zeros(9) # weights = np.zeros(9) # for agent_id in range(env.num_agents): # exp = experiences[agent_id] # n_exp = len(exp["obs"]) # print(n_exp) # # for i_exp in range(n_exp): # # for j_exp in range(n_exp): # # for i in range(9): # # w = (exp["obs"][i_exp][i] - exp["obs"][j_exp][i]) ** 2 # # d = (exp["strategy"][i_exp] - exp["strategy"][j_exp]).square().sum() # # score[i] += d * w # # weights[i] += w # # for i in range(n_exp): # print(score / weights) # obss = [[0.1, 0., 1., 4., 2., 0., 0., 0., -1.], # [0.1, 0., 1., 4., 2., 0., 0., 0., 1.], # [0.1, 0., 1., 4., 2., 0., 0., -1., 0.], # [0.1, 0., 1., 4., 2., 0., 0., 1., 0.]] # obss = [[0.1, 0., 0., 4., 2., 0., 0., 1., -1.], # [0, 3] # [0.1, 0., 0., 4., 2., 0., 0., 1., 1.], # [1, 3] # [0.1, 0., 0., 4., 2., 0., 0., -1., 1.], # [1, 2] # [0.1, 0., 0., 4., 2., 0., 0., -1., -1.]] # [0, 2] obss = [[0.1, 0., 0., 4., 2., 0., 0., 0., -1.], [0.1, 0., 0., 4., 2., 0., 0., 0., 1.], [0.1, 0., 0., 4., 2., 0., 0., -1., 0.], [0.1, 0., 0., 4., 2., 0., 0., 1., 0.]] while True: obs = input("obs: ") agent = 0 obs = torch.tensor(list(map(float, obs.split()))) strategy = agents[agent].get_strategy(obs, None, None).detach() print(strategy) print(-torch.log(strategy)) for i in range(env.action_spaces[agent].n): action = torch.LongTensor([i]) prediction = agents[agent].get_reward_prediction( obs, None, None, action) reward_prediction, random_net_value, random_net_prediction = prediction # print(reward_prediction.item(), random_net_value.item(), random_net_prediction.item(), (random_net_value - random_net_prediction).item()) print(reward_prediction.item() / args.reward_prediction_multiplier) # obss = experiences[0]["obs"] n_obs = len(obss) obs = torch.tensor(obss, requires_grad=True) strategy = agents[0].get_strategy(obs, None, None) grads = np.zeros((9, )) for i in range(n_obs): _grads = np.zeros((9, )) for j in range(5): grad = torch.autograd.grad( strategy[i, j], obs, retain_graph=True)[0][i].detach().abs().numpy() # print(grad.shape) _grads += grad grads += _grads print(-torch.log(strategy.detach()))
def main(): args = get_args() torch.manual_seed(args.seed) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") actor_critic = Policy(STATE_DIM, ACTION_DIM, USER_DIM) actor_critic.to(device) agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) if args.cgail: discr = cgail.Discriminator(STATE_DIM, ACTION_DIM, USER_DIM, device, lr=args.D_lr) train_file_name = os.path.join(args.experts_dir, "expert_traj.pkl") test_file_name = os.path.join(args.experts_dir, "test_traj.pkl") ground_file_name = os.path.join(args.experts_dir, "exp_loc.pkl") expert_st, expert_ur, expert_ac = pickle.load(open(train_file_name, 'rb')) train_load = data_utils.TensorDataset( torch.from_numpy(np.asarray(expert_st)), torch.from_numpy(np.asarray(expert_ur)), torch.from_numpy(np.asarray(expert_ac))) gail_train_loader = torch.utils.data.DataLoader( train_load, batch_size=args.gail_batch_size, shuffle=True) test_st, test_ur, test_ac = pickle.load(open(test_file_name, 'rb')) test_load = data_utils.TensorDataset(torch.from_numpy(np.asarray(test_st)), torch.from_numpy(np.asarray(test_ur)), torch.from_numpy(np.asarray(test_ac))) test_loader = torch.utils.data.DataLoader(test_load, batch_size=args.gail_batch_size, shuffle=True) exp_loc = pickle.load(open(ground_file_name, 'rb')) envs = make_vec_envs(expert_st, expert_ur, args.seed, args.num_processes, args.gamma, device) rollouts = RolloutStorage(args.num_steps, args.num_processes, STATE_DIM * 5, USER_DIM, ACTION_DIM) obs, user = envs.reset() rollouts.obs[0].copy_(obs[0]) rollouts.user[0].copy_(user[0]) rollouts.to(device) result_log = [] start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule(agent.optimizer, j, num_updates, args.lr) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob = actor_critic.act( rollouts.obs[step], rollouts.user[step]) # Obser reward and next obs if action.item() != 9: obs = decide_next_state(action, rollouts.obs[step][0], 1) if obs != None: rollouts.insert(obs, rollouts.user[step], action, action_log_prob, value) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.user[-1]).detach() gail_epoch = args.gail_epoch if j < 10: gail_epoch = 100 # Warm up for _ in range(gail_epoch): discr.update(gail_train_loader, rollouts) for step in range(args.num_steps): if cgail: rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.user[step], rollouts.actions[step], args.gamma) else: rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma) rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda) value_loss, action_loss, dist_entropy = agent.update(rollouts) rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, str(args.lr), str(args.gail_batch_size), "entropy_" + str(args.entropy_coef), "D_lr" + str(args.D_lr)) try: os.makedirs(save_path) except OSError: pass torch.save(actor_critic, os.path.join(save_path, "ac_{}.pt".format(j))) torch.save(discr, os.path.join(save_path, "D_{}.pt".format(j))) if j % args.log_interval == 0: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print("Updates {}, num timesteps {}, FPS {}".format( j, total_num_steps, int(total_num_steps / (end - start)))) out_loc = {} for i, data in enumerate(test_loader, 0): inputs, user, labels = data inputs = inputs.float() user = user.float() labels = labels.long() output = actor_critic.act(inputs, user)[1].tolist() for i in range(inputs.size(0)): x = int(inputs[i][0].item()) y = int(inputs[i][1].item()) if (x, y) not in out_loc: out_loc[(x, y)] = np.zeros(10) out_loc[(x, y)][output[i]] += 1 else: out_loc[(x, y)][output[i]] += 1 target = [] ground = [] for key in out_loc: o1 = out_loc[key].copy() o1 /= sum(o1) if key in exp_loc: o2 = np.zeros(10) for b, w in exp_loc[key].items(): o2[b] += w o2 /= sum(o2) target.append(o1) ground.append(o2) k, kls = cross_entropy(target, ground) print(k)
def main(): all_episode_rewards = [] ### 记录 6/29 all_temp_rewards = [] ### 记录 6/29 args = get_args() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, False) actor_critic = Policy(envs.observation_space.shape, envs.action_space, base_kwargs={'recurrent': args.recurrent_policy}) actor_critic.to(device) if args.algo == 'a2c': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo': agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) elif args.algo == 'acktr': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) if args.gail: assert len(envs.observation_space.shape) == 1 discr = gail.Discriminator( envs.observation_space.shape[0] + envs.action_space.shape[0], 100, device) file_name = os.path.join( args.gail_experts_dir, "trajs_{}.pt".format(args.env_name.split('-')[0].lower())) gail_train_loader = torch.utils.data.DataLoader( gail.ExpertDataset(file_name, num_trajectories=4, subsample_frequency=20), batch_size=args.gail_batch_size, shuffle=True, drop_last=True) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes print('num_updates ', num_updates) print('num_steps ', args.num_steps) count = 0 h5_path = './data/' + args.env_name if not os.path.exists(h5_path): os.makedirs(h5_path) h5_filename = h5_path + '/trajs_' + args.env_name + '_%05d.h5' % (count) data = {} data['states'] = [] data['actions'] = [] data['rewards'] = [] data['done'] = [] data['lengths'] = [] episode_step = 0 for j in range(num_updates): ### num-steps temp_states = [] temp_actions = [] temp_rewards = [] temp_done = [] temp_lenthgs = [] if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) if j == 0 and step == 0: print('obs ', type(rollouts.obs[step]), rollouts.obs[step].shape) print('hidden_states ', type(rollouts.recurrent_hidden_states[step]), rollouts.recurrent_hidden_states[step].shape) print('action ', type(action), action.shape) print('action prob ', type(action_log_prob), action_log_prob.shape) print('-' * 20) # Obser reward and next obs obs, reward, done, infos = envs.step(action) #print(infos) #print(reward) temp_states += [np.array(rollouts.obs[step].cpu())] temp_actions += [np.array(action.cpu())] #temp_rewards += [np.array(reward.cpu())] temp_rewards += [np.array([infos[0]['myrewards']]) ] ### for halfcheetah不能直接用 reward !! 6/29 temp_done += [np.array(done)] if j == 0 and step == 0: print('obs ', type(obs), obs.shape) print('reward ', type(reward), reward.shape) print('done ', type(done), done.shape) print('infos ', len(infos)) for k, v in infos[0].items(): print(k, v.shape) print() for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) all_episode_rewards += [info['episode']['r']] ### 记录 6/29 # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) temp_lengths = len(temp_states) temp_states = np.concatenate(temp_states) temp_actions = np.concatenate(temp_actions) temp_rewards = np.concatenate(temp_rewards) temp_done = np.concatenate(temp_done) #print('temp_lengths',temp_lengths) #print('temp_states', temp_states.shape) #print('temp_actions', temp_actions.shape) #print('temp_rewards', temp_rewards.shape) if j > int(0.4 * num_updates): data['states'] += [temp_states] data['actions'] += [temp_actions] data['rewards'] += [temp_rewards] data['lengths'] += [temp_lengths] data['done'] += [temp_done] #print('temp_lengths',data['lengths'].shape) #print('temp_states', data['states'].shape) #print('temp_actions', data['actions'].shape) #print('temp_rewards', data['rewards'].shape) if args.save_expert and len(data['states']) >= 100: with h5py.File(h5_filename, 'w') as f: f['states'] = np.array(data['states']) f['actions'] = np.array(data['actions']) f['rewards'] = np.array(data['rewards']) f['done'] = np.array(data['done']) f['lengths'] = np.array(data['lengths']) #print('f_lengths',f['lengths'].shape) #print('f_states', f['states'].shape) #print('f_actions', f['actions'].shape) #print('f_rewards', f['rewards'].shape) count += 1 h5_filename = h5_path + '/trajs_' + args.env_name + '_%05d.h5' % ( count) data['states'] = [] data['actions'] = [] data['rewards'] = [] data['done'] = [] data['lengths'] = [] with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() if args.gail: if j >= 10: envs.venv.eval() gail_epoch = args.gail_epoch if j < 10: gail_epoch = 100 # Warm up for _ in range(gail_epoch): discr.update(gail_train_loader, rollouts, utils.get_vec_normalize(envs)._obfilt) for step in range(args.num_steps): rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma, rollouts.masks[step]) rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) value_loss, action_loss, dist_entropy = agent.update(rollouts) rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], os.path.join(save_path, args.env_name + "_%d.pt" % (args.seed))) if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) #np.save(os.path.join(save_path, args.env_name+"_%d"%(args.seed)), all_episode_rewards) ### 保存记录 6/29 #print(temp_rewards) print("temp rewards size", temp_rewards.shape, "mean", np.mean(temp_rewards), "min", np.min(temp_rewards), "max", np.max(temp_rewards)) all_temp_rewards += [temp_rewards] np.savez(os.path.join(save_path, args.env_name + "_%d" % (args.seed)), episode=all_episode_rewards, timestep=all_temp_rewards) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device) '''data['states'] = np.array(data['states'])
def main(): args = get_args() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") # coinrun environments need to be treated differently. coinrun_envs = { 'CoinRun': 'standard', 'CoinRun-Platforms': 'platform', 'Random-Mazes': 'maze' } envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, False, coin_run_level=args.num_levels, difficulty=args.high_difficulty, coin_run_seed=args.seed) if args.env_name in coinrun_envs.keys(): observation_space_shape = (3, 64, 64) args.save_dir = args.save_dir + "/NUM_LEVELS_{}".format( args.num_levels) # Save the level info in the else: observation_space_shape = envs.observation_space.shape # trained model name if args.continue_ppo_training: actor_critic, _ = torch.load(os.path.join(args.check_point, args.env_name + ".pt"), map_location=torch.device(device)) elif args.cor_gail: embed_size = args.embed_size actor_critic = Policy(observation_space_shape, envs.action_space, hidden_size=args.hidden_size, embed_size=embed_size, base_kwargs={'recurrent': args.recurrent_policy}) actor_critic.to(device) correlator = Correlator(observation_space_shape, envs.action_space, hidden_dim=args.hidden_size, embed_dim=embed_size, lr=args.lr, device=device) correlator.to(device) embeds = torch.zeros(1, embed_size) else: embed_size = 0 actor_critic = Policy(observation_space_shape, envs.action_space, hidden_size=args.hidden_size, base_kwargs={'recurrent': args.recurrent_policy}) actor_critic.to(device) embeds = None if args.algo == 'a2c': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo': agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm, use_clipped_value_loss=True, ftrl_mode=args.cor_gail or args.no_regret_gail, correlated_mode=args.cor_gail) elif args.algo == 'acktr': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) if args.gail or args.no_regret_gail or args.cor_gail: file_name = os.path.join( args.gail_experts_dir, "trajs_{}.pt".format(args.env_name.split('-')[0].lower())) expert_dataset = gail.ExpertDataset( file_name, num_trajectories=50, subsample_frequency=1) #if subsample set to a different number, # grad_pen might need adjustment drop_last = len(expert_dataset) > args.gail_batch_size gail_train_loader = torch.utils.data.DataLoader( dataset=expert_dataset, batch_size=args.gail_batch_size, shuffle=True, drop_last=drop_last) if args.gail: discr = gail.Discriminator(observation_space_shape, envs.action_space, device=device) if args.no_regret_gail or args.cor_gail: queue = deque( maxlen=args.queue_size ) # Strategy Queues: Each element of a queue is a dicr strategy agent_queue = deque( maxlen=args.queue_size ) # Strategy Queues: Each element of a queue is an agent strategy pruning_frequency = 1 if args.no_regret_gail: discr = regret_gail.NoRegretDiscriminator(observation_space_shape, envs.action_space, device=device) if args.cor_gail: discr = cor_gail.CorDiscriminator(observation_space_shape, envs.action_space, hidden_size=args.hidden_size, embed_size=embed_size, device=device) discr.to(device) rollouts = RolloutStorage(args.num_steps, args.num_processes, observation_space_shape, envs.action_space, actor_critic.recurrent_hidden_state_size, embed_size) obs = envs.reset() rollouts.obs[0].copy_(obs) if args.cor_gail: rollouts.embeds[0].copy_(embeds) rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions # Roll-out with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step], rollouts.embeds[step]) obs, reward, done, infos = envs.step(action.to('cpu')) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) # Sample mediating/correlating actions # Correlated Roll-out if args.cor_gail: embeds, embeds_log_prob, mean = correlator.act( rollouts.obs[step], rollouts.actions[step]) rollouts.insert_embedding(embeds, embeds_log_prob) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1], rollouts.embeds[-1]).detach() if args.gail or args.no_regret_gail or args.cor_gail: if args.env_name not in {'CoinRun', 'Random-Mazes'}: if j >= 10: envs.venv.eval() gail_epoch = args.gail_epoch if args.gail: if j < 10: gail_epoch = 100 # Warm up # no need for gail epoch or warm up in the no-regret case and cor_gail. for _ in range(gail_epoch): if utils.get_vec_normalize(envs): obfilt = utils.get_vec_normalize(envs)._obfilt else: obfilt = None if args.gail: discr.update(gail_train_loader, rollouts, obfilt) if args.no_regret_gail or args.cor_gail: last_strategy = discr.update(gail_train_loader, rollouts, queue, args.max_grad_norm, obfilt, j) for step in range(args.num_steps): if args.gail: rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma, rollouts.masks[step]) if args.no_regret_gail: rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma, rollouts.masks[step], queue) if args.cor_gail: rollouts.rewards[ step], correlator_reward = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], rollouts.embeds[step], args.gamma, rollouts.masks[step], queue) rollouts.correlated_reward[step] = correlator_reward rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) if args.gail: value_loss, action_loss, dist_entropy = agent.update(rollouts, j) elif args.no_regret_gail or args.cor_gail: value_loss, action_loss, dist_entropy, agent_gains, agent_strategy = \ agent.mixed_update(rollouts, agent_queue, j) if args.cor_gail: correlator.update(rollouts, agent_gains, args.max_grad_norm) if args.no_regret_gail or args.cor_gail: queue, _ = utils.queue_update(queue, pruning_frequency, args.queue_size, j, last_strategy) agent_queue, pruning_frequency = utils.queue_update( agent_queue, pruning_frequency, args.queue_size, j, agent_strategy) rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass if not args.cor_gail: torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], os.path.join(save_path, args.env_name + ".pt")) else: print("saving models in {}".format( os.path.join(save_path, args.env_name))) torch.save( correlator.state_dict(), os.path.join(save_path, args.env_name + "correlator.pt")) torch.save([ actor_critic.state_dict(), getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], os.path.join(save_path, args.env_name + "actor.pt")) if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}," " value loss/action loss {:.1f}/{}".format( j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), value_loss, action_loss)) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device)
def main(): args = get_args() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True args_dir, logs_dir, models_dir, samples_dir = get_all_save_paths( args, 'pretrain', combine_action=args.combine_action) eval_log_dir = logs_dir + "_eval" utils.cleanup_log_dir(logs_dir) utils.cleanup_log_dir(eval_log_dir) _, _, intrinsic_models_dir, _ = get_all_save_paths(args, 'learn_reward', load_only=True) if args.load_iter != 'final': intrinsic_model_file_name = os.path.join( intrinsic_models_dir, args.env_name + '_{}.pt'.format(args.load_iter)) else: intrinsic_model_file_name = os.path.join( intrinsic_models_dir, args.env_name + '.pt'.format(args.load_iter)) intrinsic_arg_file_name = os.path.join(args_dir, 'command.txt') # save args to arg_file with open(intrinsic_arg_file_name, 'w') as f: json.dump(args.__dict__, f, indent=2) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, logs_dir, device, False) actor_critic = Policy(envs.observation_space.shape, envs.action_space, base_kwargs={'recurrent': args.recurrent_policy}) actor_critic.to(device) if args.algo == 'a2c': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo': agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) elif args.algo == 'acktr': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) else: raise NotImplementedError if args.use_intrinsic: obs_shape = envs.observation_space.shape if len(obs_shape) == 3: action_dim = envs.action_space.n elif len(obs_shape) == 1: action_dim = envs.action_space.shape[0] if 'NoFrameskip' in args.env_name: file_name = os.path.join( args.experts_dir, "trajs_ppo_{}.pt".format( args.env_name.split('-')[0].replace('NoFrameskip', '').lower())) else: file_name = os.path.join( args.experts_dir, "trajs_ppo_{}.pt".format(args.env_name.split('-')[0].lower())) rff = RewardForwardFilter(args.gamma) intrinsic_rms = RunningMeanStd(shape=()) if args.intrinsic_module == 'icm': print('Loading pretrained intrinsic module: %s' % intrinsic_model_file_name) inverse_model, forward_dynamics_model, encoder = torch.load( intrinsic_model_file_name) icm = IntrinsicCuriosityModule(envs, device, inverse_model, forward_dynamics_model, \ inverse_lr=args.intrinsic_lr, forward_lr=args.intrinsic_lr,\ ) if args.intrinsic_module == 'vae': print('Loading pretrained intrinsic module: %s' % intrinsic_model_file_name) vae = torch.load(intrinsic_model_file_name) icm = GenerativeIntrinsicRewardModule(envs, device, \ vae, lr=args.intrinsic_lr, \ ) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) obs, reward, done, infos = envs.step(action) next_obs = obs for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, next_obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() if args.use_intrinsic: for step in range(args.num_steps): state = rollouts.obs[step] action = rollouts.actions[step] next_state = rollouts.next_obs[step] if args.intrinsic_module == 'icm': state = encoder(state) next_state = encoder(next_state) with torch.no_grad(): rollouts.rewards[ step], pred_next_state = icm.calculate_intrinsic_reward( state, action, next_state, args.lambda_true_action) if args.standardize == 'True': buf_rews = rollouts.rewards.cpu().numpy() intrinsic_rffs = np.array( [rff.update(rew) for rew in buf_rews.T]) rffs_mean, rffs_std, rffs_count = mpi_moments( intrinsic_rffs.ravel()) intrinsic_rms.update_from_moments(rffs_mean, rffs_std**2, rffs_count) mean = intrinsic_rms.mean std = np.asarray(np.sqrt(intrinsic_rms.var)) rollouts.rewards = rollouts.rewards / torch.from_numpy(std).to( device) rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) value_loss, action_loss, dist_entropy = agent.update(rollouts) rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(models_dir, args.algo) policy_file_name = os.path.join(save_path, args.env_name + '.pt') try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], policy_file_name) if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "{} Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(args.env_name, j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device)
def main(): args = get_args() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:" + str(args.cuda_id) if args.cuda else "cpu") envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, False) actor_critic = Policy(envs.observation_space.shape, envs.action_space, base_kwargs={'recurrent': args.recurrent_policy}) actor_critic.to(device) if args.algo == 'a2c': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo': agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) elif args.algo == 'acktr': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) if args.gail: assert len(envs.observation_space.shape) == 1 discr = gail.Discriminator( envs.observation_space.shape[0] + envs.action_space.shape[0], 100, device) file_name = os.path.join( args.gail_experts_dir, "trajs_{}.pt".format(args.env_name.split('-')[0].lower())) expert_dataset = gail.ExpertDataset(file_name, num_trajectories=4, subsample_frequency=20) drop_last = len(expert_dataset) > args.gail_batch_size gail_train_loader = torch.utils.data.DataLoader( dataset=expert_dataset, batch_size=args.gail_batch_size, shuffle=True, drop_last=drop_last) ########## file related filename = args.env_name + "_" + args.algo + "_n" + str(args.max_episodes) if args.attack: filename += "_" + args.type + "_" + args.aim filename += "_s" + str(args.stepsize) + "_m" + str( args.maxiter) + "_r" + str(args.radius) + "_f" + str(args.frac) if args.run >= 0: filename += "_run" + str(args.run) logger = get_log(args.logdir + filename + "_" + current_time) logger.info(args) rew_file = open(args.resdir + filename + ".txt", "w") if args.compute: radius_file = open( args.resdir + filename + "_radius" + "_s" + str(args.stepsize) + "_m" + str(args.maxiter) + "_th" + str(args.dist_thres) + ".txt", "w") if args.type == "targ" or args.type == "fgsm": targ_file = open(args.resdir + filename + "_targ.txt", "w") num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes if args.type == "wb": attack_net = WbAttacker(agent, envs, int(args.frac * num_updates), num_updates, args, device=device) if args.type == "bb": attack_net = BbAttacker(agent, envs, int(args.frac * num_updates), num_updates, args, device=device) elif args.type == "rand": attack_net = RandAttacker(envs, radius=args.radius, frac=args.frac, maxat=int(args.frac * num_updates), device=device) elif args.type == "semirand": attack_net = WbAttacker(agent, envs, int(args.frac * num_updates), num_updates, args, device, rand_select=True) elif args.type == "targ": if isinstance(envs.action_space, Discrete): action_dim = envs.action_space.n target_policy = action_dim - 1 elif isinstance(envs.action_space, Box): action_dim = envs.action_space.shape[0] target_policy = torch.zeros(action_dim) # target_policy[-1] = 1 print("target policy is", target_policy) attack_net = TargAttacker(agent, envs, int(args.frac * num_updates), num_updates, target_policy, args, device=device) elif args.type == "fgsm": if isinstance(envs.action_space, Discrete): action_dim = envs.action_space.n target_policy = action_dim - 1 elif isinstance(envs.action_space, Box): action_dim = envs.action_space.shape[0] target_policy = torch.zeros(action_dim) def targ_policy(obs): return target_policy attack_net = FGSMAttacker(envs, agent, targ_policy, radius=args.radius, frac=args.frac, maxat=int(args.frac * num_updates), device=device) # if args.aim == "obs" or aim == "hybrid": # obs_space = gym.make(args.env_name).observation_space # attack_net.set_obs_range(obs_space.low, obs_space.high) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) episode_rewards = deque(maxlen=10) episode = 0 start = time.time() for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions if args.type == "fgsm": # print("before", rollouts.obs[step]) rollouts.obs[step] = attack_net.attack( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]).clone() # print("after", rollouts.obs[step]) with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) if args.type == "targ" or args.type == "fgsm": if isinstance(envs.action_space, Discrete): num_target = ( action == target_policy).nonzero()[:, 0].size()[0] targ_file.write( str(num_target / args.num_processes) + "\n") print("percentage of target:", num_target / args.num_processes) elif isinstance(envs.action_space, Box): target_action = target_policy.repeat(action.size()[0], 1) targ_file.write( str( torch.norm(action - target_action).item() / args.num_processes) + "\n") print("percentage of target:", torch.sum(action).item() / args.num_processes) # Obser reward and next obs obs, reward, done, infos = envs.step(action.cpu()) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # rew_file.write("episode: {}, total reward: {}\n".format(episode, info['episode']['r'])) episode += 1 # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() if args.gail: if j >= 10: envs.venv.eval() gail_epoch = args.gail_epoch if j < 10: gail_epoch = 100 # Warm up for _ in range(gail_epoch): discr.update(gail_train_loader, rollouts, utils.get_vec_normalize(envs)._obfilt) for step in range(args.num_steps): rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma, rollouts.masks[step]) if args.attack and args.type != "fgsm": if args.aim == "reward": logger.info(rollouts.rewards.flatten()) rollouts.rewards = attack_net.attack_r_general( rollouts, next_value).clone().detach() logger.info("after attack") logger.info(rollouts.rewards.flatten()) elif args.aim == "obs": origin = rollouts.obs.clone() rollouts.obs = attack_net.attack_s_general( rollouts, next_value).clone().detach() logger.info(origin) logger.info("after") logger.info(rollouts.obs) elif args.aim == "action": origin = torch.flatten(rollouts.actions).clone() rollouts.actions = attack_net.attack_a_general( rollouts, next_value).clone().detach() logger.info("attack value") logger.info(torch.flatten(rollouts.actions) - origin) elif args.aim == "hybrid": res_aim, attack = attack_net.attack_hybrid( rollouts, next_value, args.radius_s, args.radius_a, args.radius_r) print("attack ", res_aim) if res_aim == "obs": origin = rollouts.obs.clone() rollouts.obs = attack.clone().detach() logger.info(origin) logger.info("attack obs") logger.info(rollouts.obs) elif res_aim == "action": origin = torch.flatten(rollouts.actions).clone() rollouts.actions = attack.clone().detach() logger.info("attack action") logger.info(torch.flatten(rollouts.actions) - origin) elif res_aim == "reward": logger.info(rollouts.rewards.flatten()) rollouts.rewards = attack.clone().detach() logger.info("attack reward") logger.info(rollouts.rewards.flatten()) if args.compute: stable_radius = attack_net.compute_radius(rollouts, next_value) print("stable radius:", stable_radius) radius_file.write("update: {}, radius: {}\n".format( j, np.round(stable_radius, decimals=3))) rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) if args.attack and args.type == "bb": attack_net.learning(rollouts) value_loss, action_loss, dist_entropy = agent.update(rollouts) rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], os.path.join(save_path, args.env_name + ".pt")) if j % args.log_interval == 0 and len(episode_rewards) >= 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) rew_file.write("updates: {}, mean reward: {}\n".format( j, np.mean(episode_rewards))) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device) # if episode > args.max_episodes: # print("reach episodes limit") # break if args.attack: logger.info("total attacks: {}\n".format(attack_net.attack_num)) print("total attacks: {}\n".format(attack_net.attack_num)) rew_file.close() if args.compute: radius_file.close() if args.type == "targ" or args.type == "fgsm": targ_file.close()
def main(): args = get_args() # set seeds and devices np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") log_dir = utils.default_log_init(args.log_dir, args.env_name) save_dir = utils.default_save_init(log_dir, args.save_dir) args_file = utils.default_args_init(log_dir, args) threads_dir = log_dir + "threads/" os.makedirs(threads_dir) logger.configure(log_dir) print(log_dir) eval_log_dir = log_dir + "_eval" envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, threads_dir, device, False) action_sample = envs.action_space.sample() def init_alg(): actor_critic = Policy(envs.observation_space.shape, envs.action_space, base_kwargs={ 'recurrent': args.recurrent_policy }).to(device) agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm, weight_decay=args.weight_decay) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) return actor_critic, agent, rollouts actor_critic, agent, rollouts = init_alg() print(actor_critic) print(actor_critic.num_params) # init observations and rollouts obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) # init train loggers(not useful for the actual training, but for analysis) episode_rewards = deque(maxlen=args.average_over) start_time = time.time() abs_start = start_time min_rewards = [] max_rewards = [] mean_rewards = [] median_rewards = [] nr_episodes = [] times = [] num_total_steps = [] log_dict = { "min_rewards": min_rewards, "max_rewards": max_rewards, "mean_rewards": mean_rewards, "median_rewards": median_rewards, "nr_episodes": nr_episodes, "times": times, "num_total_steps": num_total_steps } # init convergence checks and other useful variables best_avg = -1e6 best_med = -1e6 since_improve = 0 solved = 0 epochs = int(args.num_env_steps) // args.num_steps // args.num_processes for j in range(1, epochs + 1): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, epochs, agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) # Obser reward and next obs if type(action_sample) is int: obs, reward, done, infos = envs.step(action.squeeze()) else: obs, reward, done, infos = envs.step(action) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) value_loss, action_loss, dist_entropy = agent.update(rollouts) rollouts.after_update() if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end_time = time.time() s_total = end_time - abs_start print( "Updates(epochs) {}, num timesteps {}, elapsed {:01}:{:02}:{:02.2f} epoch seconds {} \n Last {} training episodes: " "mean/median reward {:.1f}/{:.1f},min/max reward {:.1f}/{:.1f}\n " .format(j, total_num_steps, int(s_total // 3600), int(s_total % 3600 // 60), s_total % 60, end_time - start_time, len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss), flush=True) min_rewards.append(np.min(episode_rewards)) max_rewards.append(np.max(episode_rewards)) mean_rewards.append(np.mean(episode_rewards)) median_rewards.append(np.median(episode_rewards)) nr_episodes.append(total_num_steps) times.append(end_time - start_time) num_total_steps.append(total_num_steps) start_time = end_time if (j % args.save_interval == 0 or j == epochs - 1) and save_dir != "": save_path = "{}it{}_val{:.1f}.pth".format(save_dir, j, np.mean(episode_rewards)) torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], save_path) print("-------Saved at path {}-------\n".format(save_path)) with open(save_dir + "it_{}_log.json".format(j), "w") as file: json.dump(log_dict, file) if args.convergence_its != 0: worse = True if best_avg < np.mean(episode_rewards): best_avg = np.mean(episode_rewards) since_improve = 0 worse = False if best_med < np.median(episode_rewards): best_med = np.median(episode_rewards) since_improve = 0 worse = False if worse: since_improve += 1 if since_improve > args.convergence_its: print( "No improvements in {} iterations, best average is {}, best median is {}, stopping training" .format(since_improve, best_avg, best_med)) save_path = "{}it{}_val{:.1f}_c.pth".format( save_dir, j, np.mean(episode_rewards)) print("Saved final model at {}".format(save_path)) torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], save_path) return if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device)
def main(): args = get_args() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir + args.env_name) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) log_dir2 = os.path.expanduser(args.log_dir2 + args.env_name2) eval_log_dir2 = log_dir + "_eval" utils.cleanup_log_dir(log_dir2) utils.cleanup_log_dir(eval_log_dir2) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") import json file_path = "config.json" setup_json = json.load(open(file_path, 'r')) env_conf = setup_json["Default"] for i in setup_json.keys(): if i in args.env_name: env_conf = setup_json[i] # 1 game envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, env_conf, False) # 2 game envs2 = make_vec_envs(args.env_name2, args.seed, args.num_processes, args.gamma, args.log_dir2, device, env_conf, False) save_model, ob_rms = torch.load('./trained_models/PongNoFrameskip-v4.pt') from a2c_ppo_acktr.cnn import CNNBase a = CNNBase(envs.observation_space.shape[0], recurrent=False) actor_critic = Policy( envs.observation_space.shape, envs.action_space, #(obs_shape[0], ** base_kwargs) base=a, #base_kwargs={'recurrent': args.recurrent_policy} ) #actor_critic.load_state_dict(save_model.state_dict()) actor_critic.to(device) actor_critic2 = Policy(envs2.observation_space.shape, envs2.action_space, base=a) #base_kwargs={'recurrent': args.recurrent_policy}) #actor_critic2.load_state_dict(save_model.state_dict()) actor_critic2.to(device) if args.algo == 'a2c': agent = algo.A2C_ACKTR(actor_critic, actor_critic2, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) rollouts2 = RolloutStorage(args.num_steps, args.num_processes, envs2.observation_space.shape, envs2.action_space, actor_critic2.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) obs2 = envs2.reset() rollouts2.obs[0].copy_(obs2) rollouts2.to(device) episode_rewards = deque(maxlen=10) episode_rewards2 = deque(maxlen=10) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes for j in range(num_updates): # if args.use_linear_lr_decay: # # decrease learning rate linearly # utils.update_linear_schedule( # agent.optimizer, j, num_updates, # agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states, _ = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) value2, action2, action_log_prob2, recurrent_hidden_states2, _ = actor_critic2.act( rollouts2.obs[step], rollouts2.recurrent_hidden_states[step], rollouts2.masks[step]) # Obser reward and next obs obs, reward, done, infos = envs.step(action) obs2, reward2, done2, infos2 = envs2.step(action2) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) for info2 in infos2: if 'episode' in info2.keys(): episode_rewards2.append(info2['episode']['r']) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) masks2 = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done2]) bad_masks2 = torch.FloatTensor( [[0.0] if 'bad_transition' in info2.keys() else [1.0] for info2 in infos2]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) rollouts2.insert(obs2, recurrent_hidden_states2, action2, action_log_prob2, value2, reward2, masks2, bad_masks2) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() next_value2 = actor_critic2.get_value( rollouts2.obs[-1], rollouts2.recurrent_hidden_states[-1], rollouts2.masks[-1]).detach() rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) rollouts2.compute_returns(next_value2, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) value_loss, action_loss, dist_entropy, value_loss2, action_loss2, dist_entropy2 = agent.update( rollouts, rollouts2) rollouts.after_update() rollouts2.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], os.path.join(save_path, args.env_name + ".pt")) torch.save([ actor_critic2, getattr(utils.get_vec_normalize(envs2), 'ob_rms2', None) ], os.path.join(save_path, args.env_name2 + ".pt")) if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards2), np.mean(episode_rewards2), np.median(episode_rewards2), np.min(episode_rewards2), np.max(episode_rewards2), dist_entropy2, value_loss2, action_loss2)) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device) ob_rms2 = utils.get_vec_normalize(envs2).ob_rms evaluate(actor_critic2, ob_rms2, args.env_name2, args.seed, args.num_processes, eval_log_dir2, device)
def main(): args = get_args() run(args)
def save_traj_noise(): args = get_args() method_type = "RL" method_name = args.algo.upper() hypers = "ec%0.5f" % args.entropy_coef exp_name = "%s-%s_s%d" % (method_name, hypers, 1) model_name = "%s-%s" % (args.env_name, exp_name) save_path = os.path.join(args.save_dir, args.algo.upper(), args.env_name) demo_file_size = 10000 clipob = 10 #default in baselines code epsilon = 1e-8 max_step = 1000 traj_deterministic = args.traj_deterministic # 1.0, 0.4, 0.3, 0.2, 0.1, 0.0 # choose one from this. Looping through the list makes rng not consistent. perf_list = [1.0] for perf in perf_list: torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True np.random.seed(args.seed) random.seed(args.seed) env = make_vec_envs(args.env_name, args.seed + 1000, 1, None, None, device='cpu', allow_early_resets=False) state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] print("State dim: %d, action dim: %d" % (state_dim, action_dim)) a_high = np.asscalar(env.action_space.high[0]) a_low = np.asscalar(env.action_space.low[0]) if args.env_name == "AntBulletEnv-v0": load_step = 10000000 if perf == 1.0: noise_level = 0 # 3500 if perf == 0.4: noise_level = 1.0 # 1500 if perf == 0.3: noise_level = 1.2 # 1000 if perf == 0.2: noise_level = 1.3 # 800 if perf == 0.1: noise_level = 1.4 # 500 if perf == 0.0: noise_level = 1.5 # 400 else: raise NotImplementedError # We need to use the same statistics for normalization as used in training actor_critic, ob_rms = \ torch.load( os.path.join(save_path, model_name + ("T%d.pt" % load_step))) vec_norm = get_vec_normalize(env) if vec_norm is not None: vec_norm.eval() vec_norm.ob_rms = None # ob_rms will be used manually in order to save unnormalize demonstrations. obs_list = [] act_lst = [] nobs_list = [] mask_list = [] reward_list = [] total_step = 0 avg_reward_episode = 0 print(model_name + ("T%d.pt" % load_step)) for i_episode in count(): obs = env.reset() sum_rewards = 0 recurrent_hidden_states = torch.zeros( 1, actor_critic.recurrent_hidden_state_size) masks = torch.zeros(1, 1) t = 0 while True: ## normalize obs used by trained models obs_normalize = np.clip((obs.numpy() - ob_rms.mean) / np.sqrt(ob_rms.var + epsilon), -clipob, clipob) obs_normalize = torch.from_numpy(obs_normalize).float() with torch.no_grad(): value, action, _, recurrent_hidden_states = actor_critic.act( obs_normalize, recurrent_hidden_states, masks, deterministic=traj_deterministic) ## add noise if noise_level > 0: # action = action + torch.FloatTensor(np.random.normal(0, noise_level, (1, action_dim))) action = action + torch.normal( mean=0, std=noise_level, size=action.size()) # Observe reward and next obs (unnormalized) next_obs, reward, done, infos = env.step(action) sum_rewards += reward masks.fill_(0.0 if done else 1.0) obs_list.append(obs.numpy()) act_lst.append(action.numpy()) nobs_list.append(next_obs.numpy()) mask_list.append(int(not done)) reward_list.append(reward.numpy()) obs = next_obs total_step += 1 t += 1 if done: print("Episode %2d: Sum rewards %0.2f, Steps %d" % (i_episode, sum_rewards, t)) break avg_reward_episode += sum_rewards # if i_episode % 10 == 0: # print('Episode %2d reward: %.2f' % (i_episode, sum_rewards)) if total_step >= demo_file_size: break """ save data """ obs_array = np.vstack(obs_list) act_array = np.vstack(act_lst) nobs_array = np.vstack(nobs_list) mask_array = np.vstack(mask_list) reward_array = np.vstack(reward_list) print("Total steps %d, total episode %d, AVG reward: %f" % (total_step, i_episode + 1, avg_reward_episode / (i_episode + 1))) traj_path = "./imitation_data/%s/" % (args.env_name) pathlib.Path(traj_path).mkdir(parents=True, exist_ok=True) traj_filename = traj_path + ("/%s_TRAJ-N%d_A%0.1f" % (args.env_name, demo_file_size, perf)) if traj_deterministic: traj_filename += "_det" else: traj_filename += "_sto" hf = h5py.File(traj_filename + ".h5", 'w') hf.create_dataset('model_file', data=model_name + ("T%d.pt" % load_step)) hf.create_dataset('obs_array', data=obs_array) hf.create_dataset('act_array', data=act_array) hf.create_dataset('nobs_array', data=nobs_array) hf.create_dataset('mask_array', data=mask_array) hf.create_dataset('reward_array', data=reward_array) hf.create_dataset('obs_rms_mean', data=ob_rms.mean) hf.create_dataset('obs_rms_var', data=ob_rms.var) print("TRAJs are saved as %s" % traj_filename)
def main(): args = get_args() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") receipts = StorageReceipt() make_env = lambda tasks: MiniWoBGraphEnvironment( base_url=os.environ.get("BASE_URL", f"file://{MINIWOB_HTML}/"), levels=tasks, level_tracker=LevelTracker(tasks), wait_ms=500, ) task = args.env_name if args.env_name == "PongNoFrameskip-v4": args.env_name = "clickbutton" task = "miniwob/click-button.html" if task == "levels": tasks = MINIWOB_CHALLENGES else: tasks = [[task]] print("Selected tasks:", tasks) NUM_ACTIONS = 1 envs = make_vec_envs( [make_env(tasks[i % len(tasks)]) for i in range(args.num_processes)], receipts) if os.path.exists("./datadir/autoencoder.pt"): dom_autoencoder = torch.load("./datadir/autoencoder.pt") dom_encoder = dom_autoencoder.encoder for param in dom_encoder.parameters(): param.requires_grad = False else: print("No dom encoder") dom_encoder = None actor_critic = Policy( envs.observation_space.shape, gym.spaces.Discrete(NUM_ACTIONS), # envs.action_space, base=GNNBase, base_kwargs={ "dom_encoder": dom_encoder, "recurrent": args.recurrent_policy }, ) actor_critic.dist = NodeObjective() actor_critic.to(device) if args.algo == "a2c": agent = algo.A2C_ACKTR( actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm, ) elif args.algo == "ppo": agent = algo.PPO( actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm, ) elif args.algo == "acktr": agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) if args.gail: assert len(envs.observation_space.shape) == 1 discr = gail.Discriminator(envs.observation_space.shape[0], 100, device) rr = ReplayRepository("/code/miniwob-plusplus-demos/*turk/*") ds = rr.get_dataset() print("GAIL Replay Dataset", ds) gail_train_loader = torch_geometric.data.DataLoader( ds, batch_size=args.gail_batch_size, shuffle=True, drop_last=True) from tensorboardX import SummaryWriter import datetime ts_str = datetime.datetime.fromtimestamp( time.time()).strftime("%Y-%m-%d_%H-%M-%S") tensorboard_writer = SummaryWriter( log_dir=os.path.join("/tmp/log", ts_str)) rollouts = ReceiptRolloutStorage( args.num_steps, args.num_processes, (1, ), # envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size, receipts, ) # resume from last save if args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass model_path = os.path.join(save_path, args.env_name + ".pt") if False and os.path.exists(model_path): print("Loadng previous model:", model_path) actor_critic = torch.load(model_path) actor_critic.train() obs = envs.reset() rollouts.obs[0].copy_(torch.tensor(obs)) rollouts.to(device) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes print("Iterations:", num_updates, args.num_steps) for j in range(num_updates): episode_rewards = deque(maxlen=args.num_steps * args.num_processes) if j and last_action_time + 5 < time.time(): # task likely timed out print("Reseting tasks") obs = envs.reset() rollouts.obs[0].copy_(torch.tensor(obs)) rollouts.recurrent_hidden_states[0].copy_( torch.zeros_like(rollouts.recurrent_hidden_states[0])) rollouts.masks[0].copy_(torch.zeros_like(rollouts.masks[0])) if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr, ) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( receipts.redeem(rollouts.obs[step]), rollouts.recurrent_hidden_states[step], rollouts.masks[step], ) # Obser reward and next obs last_action_time = time.time() obs, reward, done, infos = envs.step(action) for e, i in enumerate(infos): if i.get("real_action") is not None: action[e] = i["real_action"] if i.get("bad_transition"): action[e] = torch.zeros_like(action[e]) for info in infos: if "episode" in info.keys(): episode_rewards.append(info["episode"]["r"]) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if "bad_transition" in info.keys() else [1.0] for info in infos]) rollouts.insert( torch.tensor(obs), recurrent_hidden_states, action, action_log_prob, value, torch.tensor(reward).unsqueeze(1), masks, bad_masks, ) with torch.no_grad(): next_value = actor_critic.get_value( receipts.redeem(rollouts.obs[-1]), rollouts.recurrent_hidden_states[-1], rollouts.masks[-1], ).detach() if args.gail: # if j >= 10: # envs.venv.eval() gail_epoch = args.gail_epoch if j < 10: gail_epoch = 100 # Warm up for _ in range(gail_epoch): obsfilt = lambda x, update: x # utils.get_vec_normalize(envs)._obfilt gl = discr.update(gail_train_loader, rollouts, obsfilt) print("Gail loss:", gl) for step in range(args.num_steps): rollouts.rewards[step] = discr.predict_reward( receipts.redeem(rollouts.obs[step]), rollouts.actions[step], args.gamma, rollouts.masks[step], ) rollouts.compute_returns( next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits, ) value_loss, action_loss, dist_entropy = agent.update(rollouts) obs_shape = rollouts.obs.size()[2:] obs = rollouts.obs[:-1].view(-1, *obs_shape) obs = obs[torch.randint(0, obs.size(0), (1, 32))] rollouts.after_update() receipts.prune(rollouts.obs) # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass model_path = os.path.join(save_path, args.env_name + ".pt") torch.save(actor_critic, model_path) print("Saved model:", model_path) if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format( j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss, )) from pprint import pprint pprint(LevelTracker.global_scoreboard) # tensorboard_writer.add_histogram( # "task_ranks", torch.tensor(predictor._difficulty_rank), total_num_steps # ) tensorboard_writer.add_histogram("value", value, total_num_steps) tensorboard_writer.add_histogram("x", actor_critic.base.last_x, total_num_steps) tensorboard_writer.add_histogram("query", actor_critic.base.last_query, total_num_steps) tensorboard_writer.add_histogram("inputs_at", actor_critic.base.last_inputs_at, total_num_steps) tensorboard_writer.add_scalar("mean_reward", np.mean(episode_rewards), total_num_steps) tensorboard_writer.add_scalar("median_reward", np.median(episode_rewards), total_num_steps) tensorboard_writer.add_scalar("min_reward", np.min(episode_rewards), total_num_steps) tensorboard_writer.add_scalar("max_reward", np.max(episode_rewards), total_num_steps) tensorboard_writer.add_scalar("dist_entropy", dist_entropy, total_num_steps) tensorboard_writer.add_scalar("value_loss", value_loss, total_num_steps) tensorboard_writer.add_scalar("action_loss", action_loss, total_num_steps) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate( actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device, )
def main(): args = get_args() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") #envs = make_vec_envs(args.env_name, args.seed, args.num_processes, # args.gamma, args.log_dir, device, False) envs = make_parallel_env(args.env_name, args.num_processes, args.seed, True) ''' actor_critic = Policy( envs.observation_space[0].shape, envs.action_space[0], agent_num=args.agent_num, base_kwargs={'recurrent': args.recurrent_policy}) actor_critic.to(device) ''' actor_critic = [] for i in range(args.agent_num): ac = Policy( envs.observation_space[0].shape, envs.action_space[0], agent_num=args.agent_num, agent_i = i, base_kwargs={'recurrent': args.recurrent_policy}) ac.to(device) actor_critic.append(ac) if args.algo == 'a2c': agent = algo.A2C_ACKTR( actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo': ''' agent = algo.PPO( actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) ''' agent = [] for i in range(args.agent_num): agent.append(algo.PPO( actor_critic[i], i, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm, model_dir = args.model_dir)) elif args.algo == 'acktr': agent = algo.A2C_ACKTR( actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) if args.gail: assert len(envs.observation_space.shape) == 1 discr = gail.Discriminator( envs.observation_space.shape[0] + envs.action_space.shape[0], 100, device) file_name = os.path.join( args.gail_experts_dir, "trajs_{}.pt".format( args.env_name.split('-')[0].lower())) expert_dataset = gail.ExpertDataset( file_name, num_trajectories=4, subsample_frequency=20) drop_last = len(expert_dataset) > args.gail_batch_size gail_train_loader = torch.utils.data.DataLoader( dataset=expert_dataset, batch_size=args.gail_batch_size, shuffle=True, drop_last=drop_last) ''' rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space[0].shape, envs.action_space[0], actor_critic.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(torch.tensor(obs[:,0,:])) rollouts.to(device) ''' rollouts = [] for i in range(args.agent_num): rollout = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space[0].shape, envs.action_space[0], actor_critic[i].recurrent_hidden_state_size, args.agent_num, i) rollouts.append(rollout) obs = envs.reset() # pdb.set_trace() for i in range(args.agent_num): rollouts[i].share_obs[0].copy_(torch.tensor(obs.reshape(args.num_processes, -1))) rollouts[i].obs[0].copy_(torch.tensor(obs[:,i,:])) rollouts[i].to(device) episode_rewards = deque(maxlen=10) num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes print(num_updates) for j in range(num_updates): #pdb.set_trace() if args.use_linear_lr_decay: # decrease learning rate linearly for i in range(args.agent_num): utils.update_linear_schedule(agent[i].optimizer, j, num_updates, agent[i].optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions value_list, action_list, action_log_prob_list, recurrent_hidden_states_list = [], [], [], [] with torch.no_grad(): for i in range(args.agent_num): #pdb.set_trace() value, action, action_log_prob, recurrent_hidden_states = actor_critic[i].act( rollouts[i].share_obs[step], rollouts[i].obs[step], rollouts[i].recurrent_hidden_states[step], rollouts[i].masks[step]) # import pdb; pdb.set_trace() value_list.append(value) action_list.append(action) action_log_prob_list.append(action_log_prob) recurrent_hidden_states_list.append(recurrent_hidden_states) # Obser reward and next obs action = [] for i in range(args.num_processes): one_env_action = [] for k in range(args.agent_num): one_hot_action = np.zeros(envs.action_space[0].n) one_hot_action[action_list[k][i]] = 1 one_env_action.append(one_hot_action) action.append(one_env_action) #start = time.time() #pdb.set_trace() obs, reward, done, infos = envs.step(action) # print(obs[0][0]) # pdb.set_trace() #end = time.time() #print("step time: ", end-start) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # If done then clean the history of observations. ''' masks = torch.FloatTensor( [[0.0] if done_ else [1.0] for done_ in done[0]]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos[0]]) ''' masks = torch.ones(args.num_processes, 1) bad_masks = torch.ones(args.num_processes, 1) ''' rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) ''' #import pdb; pdb.set_trace() for i in range(args.agent_num): rollouts[i].insert(torch.tensor(obs.reshape(args.num_processes, -1)), torch.tensor(obs[:,i,:]), recurrent_hidden_states, action_list[i], action_log_prob_list[i], value_list[i], torch.tensor(reward[:, i].reshape(-1,1)), masks, bad_masks) #import pdb; pdb.set_trace() with torch.no_grad(): next_value_list = [] for i in range(args.agent_num): next_value = actor_critic[i].get_value( rollouts[i].share_obs[-1], rollouts[i].obs[-1], rollouts[i].recurrent_hidden_states[-1], rollouts[i].masks[-1]).detach() next_value_list.append(next_value) if args.gail: if j >= 10: envs.venv.eval() gail_epoch = args.gail_epoch if j < 10: gail_epoch = 100 # Warm up for _ in range(gail_epoch): discr.update(gail_train_loader, rollouts, utils.get_vec_normalize(envs)._obfilt) for step in range(args.num_steps): rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma, rollouts.masks[step]) for i in range(args.agent_num): rollouts[i].compute_returns(next_value_list[i], args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) #import pdb; pdb.set_trace() for i in range(args.agent_num): value_loss, action_loss, dist_entropy = agent[i].update(rollouts[i]) if (i == 0): print("value loss: " + str(value_loss)) # print(value_loss) # pdb.set_trace() #rollouts.after_update() obs = envs.reset() # pdb.set_trace() for i in range(args.agent_num): rollouts[i].share_obs[0].copy_(torch.tensor(obs.reshape(args.num_processes, -1))) rollouts[i].obs[0].copy_(torch.tensor(obs[:,i,:])) rollouts[i].to(device) # save for every interval-th episode or for the last epoch #pdb.set_trace() if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) if not os.path.exists(save_path + args.model_dir): os.makedirs(save_path + args.model_dir) for i in range(args.agent_num): torch.save([ actor_critic[i], getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], save_path + args.model_dir + '/agent_%i' % (i+1) + ".pt") ''' if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) ''' '''
def save_traj_perf(): args = get_args() method_type = "RL" method_name = args.algo.upper() hypers = "ec%0.5f" % args.entropy_coef exp_name = "%s-%s_s%d" % (method_name, hypers, 1) model_name = "%s-%s" % (args.env_name, exp_name) save_path = os.path.join(args.save_dir, args.algo.upper(), args.env_name) traj_deterministic = args.traj_deterministic ## Approximated relative performance w.r.t. the expert performance: ## 1.0 is to save expert data, 0.4 is to save non-expert data with performance ~40% of expert, 0.0 is a random initial policy. ## Choose one from 1.0, 0.4, 0.3, 0.2, 0.1, 0.0. (Looping through the list makes rng inconsistent). perf_list = [0.0] demo_file_size = 10000 clipob = 10 #default in baselines code epsilon = 1e-8 max_step = 1000 for perf in perf_list: torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True np.random.seed(args.seed) random.seed(args.seed) env = make_vec_envs(args.env_name, args.seed + 1000, 1, None, None, device='cpu', allow_early_resets=False) state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] print("State dim: %d, action dim: %d" % (state_dim, action_dim)) if args.env_name == "HalfCheetahBulletEnv-v0": if perf == 1.0: load_step = 10000000 # 2500 if perf == 0.4: load_step = 576640 # 1300 if perf == 0.3: load_step = 384640 # 1000 if perf == 0.2: load_step = 256640 # 700 if perf == 0.1: load_step = 128640 # -1100 if perf == 0.0: load_step = 640 # -1000 if args.env_name == "AntBulletEnv-v0": if perf == 1.0: load_step = 10000000 # 3500 if perf == 0.4: load_step = 704640 # 1400 if perf == 0.3: load_step = 576640 # 1000 if perf == 0.2: load_step = 384640 # 700 if perf == 0.1: load_step = 128640 # 400 if perf == 0.0: load_step = 640 # 30 if args.env_name == "HopperBulletEnv-v0": if perf == 1.0: load_step = 10000000 # 2300 if perf == 0.4: load_step = 4416640 # 1100 if perf == 0.3: load_step = 384640 # 1000 if perf == 0.2: load_step = 256640 # 900 if perf == 0.1: load_step = 128640 # 600 if perf == 0.0: load_step = 640 # 40 if args.env_name == "Walker2DBulletEnv-v0": if perf == 1.0: load_step = 10000000 # 2700 if perf == 0.4: load_step = 1024640 # 800 if perf == 0.3: load_step = 576640 # 600 if perf == 0.2: load_step = 384640 # 700 if perf == 0.1: load_step = 256640 # 100 if perf == 0.0: load_step = 640 # 16 # We need to use the same statistics for normalization as used in training actor_critic = None ob_rms = None actor_critic, ob_rms = \ torch.load( os.path.join(save_path, model_name + ("T%d.pt" % load_step))) vec_norm = get_vec_normalize(env) if vec_norm is not None: vec_norm.eval() vec_norm.ob_rms = None # ob_rms will be used manually in order to save unnormalize demonstrations. obs_list = [] act_lst = [] nobs_list = [] mask_list = [] reward_list = [] total_step = 0 avg_reward_episode = 0 print(model_name + ("T%d.pt" % load_step)) for i_episode in count(): obs = env.reset() sum_rewards = 0 recurrent_hidden_states = torch.zeros( 1, actor_critic.recurrent_hidden_state_size) masks = torch.zeros(1, 1) t = 0 while True: ## normalize obs used by trained models obs_normalize = np.clip((obs.numpy() - ob_rms.mean) / np.sqrt(ob_rms.var + epsilon), -clipob, clipob) obs_normalize = torch.from_numpy(obs_normalize).float() with torch.no_grad(): value, action, _, recurrent_hidden_states = actor_critic.act( obs_normalize, recurrent_hidden_states, masks, deterministic=traj_deterministic) # Obser reward and next obs (unnormalized) next_obs, reward, done, infos = env.step(action) sum_rewards += reward masks.fill_(0.0 if done else 1.0) obs_list.append(obs.numpy()) act_lst.append(action.numpy()) nobs_list.append(next_obs.numpy()) mask_list.append(int(not done)) reward_list.append(reward.numpy()) obs = next_obs total_step += 1 t += 1 if done: print("Episode %2d: Sum rewards %0.2f, Steps %d" % (i_episode, sum_rewards, t)) break avg_reward_episode += sum_rewards # if i_episode % 10 == 0: # print('Episode %2d reward: %.2f' % (i_episode, sum_rewards)) if total_step >= demo_file_size: break """ save data """ obs_array = np.vstack(obs_list) act_array = np.vstack(act_lst) nobs_array = np.vstack(nobs_list) mask_array = np.vstack(mask_list) reward_array = np.vstack(reward_list) print("Total steps %d, total episode %d, AVG reward: %f" % (total_step, i_episode + 1, avg_reward_episode / (i_episode + 1))) traj_path = "./imitation_data/%s/" % (args.env_name) pathlib.Path(traj_path).mkdir(parents=True, exist_ok=True) traj_filename = traj_path + ("/%s_TRAJ-N%d_P%0.1f" % (args.env_name, demo_file_size, perf)) if traj_deterministic: traj_filename += "_det" else: traj_filename += "_sto" hf = h5py.File(traj_filename + ".h5", 'w') hf.create_dataset('model_file', data=model_name + ("T%d.pt" % load_step)) hf.create_dataset('obs_array', data=obs_array) hf.create_dataset('act_array', data=act_array) hf.create_dataset('nobs_array', data=nobs_array) hf.create_dataset('mask_array', data=mask_array) hf.create_dataset('reward_array', data=reward_array) hf.create_dataset('obs_rms_mean', data=ob_rms.mean) hf.create_dataset('obs_rms_var', data=ob_rms.var) print("TRAJs are saved as %s" % traj_filename)
def main(): args = get_args() torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) if config.cuda and torch.cuda.is_available() and config.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True logger, final_output_dir, tb_log_dir = create_logger(config, args.cfg, 'train', seed=config.seed) eval_log_dir = final_output_dir + "_eval" utils.cleanup_log_dir(final_output_dir) utils.cleanup_log_dir(eval_log_dir) logger.info(pprint.pformat(args)) logger.info(pprint.pformat(config)) writer = SummaryWriter(tb_log_dir) torch.set_num_threads(1) device = torch.device("cuda:" + config.GPUS if config.cuda else "cpu") width = height = 84 envs = make_vec_envs(config.env_name, config.seed, config.num_processes, config.gamma, final_output_dir, device, False, width=width, height=height, ram_wrapper=False) # create agent actor_critic = Policy(envs.observation_space.shape, envs.action_space, base_kwargs={ 'recurrent': config.recurrent_policy, 'hidden_size': config.hidden_size, 'feat_from_selfsup_attention': config.feat_from_selfsup_attention, 'feat_add_selfsup_attention': config.feat_add_selfsup_attention, 'feat_mul_selfsup_attention_mask': config.feat_mul_selfsup_attention_mask, 'selfsup_attention_num_keypoints': config.SELFSUP_ATTENTION.NUM_KEYPOINTS, 'selfsup_attention_gauss_std': config.SELFSUP_ATTENTION.GAUSS_STD, 'selfsup_attention_fix': config.selfsup_attention_fix, 'selfsup_attention_fix_keypointer': config.selfsup_attention_fix_keypointer, 'selfsup_attention_pretrain': config.selfsup_attention_pretrain, 'selfsup_attention_keyp_maps_pool': config.selfsup_attention_keyp_maps_pool, 'selfsup_attention_image_feat_only': config.selfsup_attention_image_feat_only, 'selfsup_attention_feat_masked': config.selfsup_attention_feat_masked, 'selfsup_attention_feat_masked_residual': config.selfsup_attention_feat_masked_residual, 'selfsup_attention_feat_load_pretrained': config.selfsup_attention_feat_load_pretrained, 'use_layer_norm': config.use_layer_norm, 'selfsup_attention_keyp_cls_agnostic': config.SELFSUP_ATTENTION.KEYPOINTER_CLS_AGNOSTIC, 'selfsup_attention_feat_use_ln': config.SELFSUP_ATTENTION.USE_LAYER_NORM, 'selfsup_attention_use_instance_norm': config.SELFSUP_ATTENTION.USE_INSTANCE_NORM, 'feat_mul_selfsup_attention_mask_residual': config.feat_mul_selfsup_attention_mask_residual, 'bottom_up_form_objects': config.bottom_up_form_objects, 'bottom_up_form_num_of_objects': config.bottom_up_form_num_of_objects, 'gaussian_std': config.gaussian_std, 'train_selfsup_attention': config.train_selfsup_attention, 'block_selfsup_attention_grad': config.block_selfsup_attention_grad, 'sep_bg_fg_feat': config.sep_bg_fg_feat, 'mask_threshold': config.mask_threshold, 'fix_feature': config.fix_feature }) # init / load parameter if config.MODEL_FILE: logger.info('=> loading model from {}'.format(config.MODEL_FILE)) state_dict = torch.load(config.MODEL_FILE) state_dict = OrderedDict( (_k, _v) for _k, _v in state_dict.items() if 'dist' not in _k) actor_critic.load_state_dict(state_dict, strict=False) elif config.RESUME: checkpoint_file = os.path.join(final_output_dir, 'checkpoint.pth') if os.path.exists(checkpoint_file): logger.info("=> loading checkpoint '{}'".format(checkpoint_file)) checkpoint = torch.load(checkpoint_file) actor_critic.load_state_dict(checkpoint['state_dict']) logger.info("=> loaded checkpoint '{}' (epoch {})".format( checkpoint_file, checkpoint['epoch'])) actor_critic.to(device) if config.algo == 'a2c': agent = algo.A2C_ACKTR( actor_critic, config.value_loss_coef, config.entropy_coef, lr=config.lr, eps=config.eps, alpha=config.alpha, max_grad_norm=config.max_grad_norm, train_selfsup_attention=config.train_selfsup_attention) elif config.algo == 'ppo': agent = algo.PPO(actor_critic, config.clip_param, config.ppo_epoch, config.num_mini_batch, config.value_loss_coef, config.entropy_coef, lr=config.lr, eps=config.eps, max_grad_norm=config.max_grad_norm) elif config.algo == 'acktr': agent = algo.A2C_ACKTR( actor_critic, config.value_loss_coef, config.entropy_coef, acktr=True, train_selfsup_attention=config.train_selfsup_attention, max_grad_norm=config.max_grad_norm) # rollouts: environment rollouts = RolloutStorage( config.num_steps, config.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size, keep_buffer=config.train_selfsup_attention, buffer_size=config.train_selfsup_attention_buffer_size) if config.RESUME: if os.path.exists(checkpoint_file): agent.optimizer.load_state_dict(checkpoint['optimizer']) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() num_updates = int( config.num_env_steps) // config.num_steps // config.num_processes best_perf = 0.0 best_model = False print('num updates', num_updates, 'num steps', config.num_steps) for j in range(num_updates): if config.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if config.algo == "acktr" else config.lr) for step in range(config.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) recurrent_hidden_states, meta = recurrent_hidden_states # Obser reward and next obs obs, reward, done, infos = envs.step(action) objects_locs = [] for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) if objects_locs: objects_locs = torch.FloatTensor(objects_locs) objects_locs = objects_locs * 2 - 1 # -1, 1 else: objects_locs = None rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks, objects_loc=objects_locs) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1], ).detach() rollouts.compute_returns(next_value, config.use_gae, config.gamma, config.gae_lambda, config.use_proper_time_limits) value_loss, action_loss, dist_entropy = agent.update(rollouts) rollouts.after_update() if config.train_selfsup_attention and j > 15: for _iter in range(config.num_steps // 5): frame_x, frame_y = rollouts.generate_pair_image() selfsup_attention_loss, selfsup_attention_output, image_b_keypoints_maps = \ agent.update_selfsup_attention(frame_x, frame_y, config.SELFSUP_ATTENTION) if j % config.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * config.num_processes * config.num_steps end = time.time() msg = 'Updates {}, num timesteps {}, FPS {} \n' \ 'Last {} training episodes: mean/median reward {:.1f}/{:.1f} ' \ 'min/max reward {:.1f}/{:.1f} ' \ 'dist entropy {:.1f}, value loss {:.1f}, action loss {:.1f}\n'. \ format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss) if config.train_selfsup_attention and j > 15: msg = msg + 'selfsup attention loss {:.5f}\n'.format( selfsup_attention_loss) logger.info(msg) if (config.eval_interval is not None and len(episode_rewards) > 1 and j % config.eval_interval == 0): total_num_steps = (j + 1) * config.num_processes * config.num_steps ob_rms = getattr(utils.get_vec_normalize(envs), 'ob_rms', None) eval_mean_score, eval_max_score, eval_scores = evaluate( actor_critic, ob_rms, config.env_name, config.seed, config.num_processes, eval_log_dir, device, width=width, height=height) perf_indicator = eval_mean_score if perf_indicator > best_perf: best_perf = perf_indicator best_model = True else: best_model = False # record test scores with open(os.path.join(final_output_dir, 'test_scores'), 'a+') as f: out_s = "TEST: {}, {}, {}, {}\n".format( str(total_num_steps), str(eval_mean_score), str(eval_max_score), [str(_eval_scores) for _eval_scores in eval_scores]) print(out_s, end="", file=f) logger.info(out_s) writer.add_scalar('data/mean_score', eval_mean_score, total_num_steps) writer.add_scalar('data/max_score', eval_max_score, total_num_steps) writer.add_scalars('test', {'mean_score': eval_mean_score}, total_num_steps) # save for every interval-th episode or for the last epoch if (j % config.save_interval == 0 or j == num_updates - 1) and config.save_dir != "": logger.info( "=> saving checkpoint to {}".format(final_output_dir)) epoch = j / config.save_interval save_checkpoint( { 'epoch': epoch + 1, 'model': get_model_name(config), 'state_dict': actor_critic.state_dict(), 'perf': perf_indicator, 'optimizer': agent.optimizer.state_dict(), 'ob_rms': getattr(utils.get_vec_normalize(envs), 'ob_rms', None) }, best_model, final_output_dir) final_model_state_file = os.path.join(final_output_dir, 'final_state.pth') logger.info( '=> saving final model state to {}'.format(final_model_state_file)) torch.save(actor_critic.state_dict(), final_model_state_file) # export_scalars_to_json needs results from add scalars writer.export_scalars_to_json(os.path.join(tb_log_dir, 'all_scalars.json')) writer.close()
def main(): realEval = True #False gettrace = getattr(sys, 'gettrace', None) parser = argparse.ArgumentParser(description='RL') parser.add_argument('--action-type', type=int, default=-1, help='action type to play (default: -1)') parser.add_argument('--tasks-difficulty-from', type=int, default=0, help='tasks_difficulty_from') parser.add_argument('--tasks-difficulty-to', type=int, default=100000, help='tasks-difficulty-to') parser.add_argument('--verboseLevel', type=int, default=5, help='verboseLevel') parser.add_argument('--filesNamesSuffix', default="", help='filesNamesSuffix') parser.add_argument('--nobest-exit', type=int, default=10000, help='nobest_exit') args = get_args(parser) args.algo = 'ppo' args.env_name = 'QuadruppedWalk-v1' #'RoboschoolAnt-v1' #'QuadruppedWalk-v1' #'RoboschoolAnt-v1' #'QuadruppedWalk-v1' args.use_gae = True args.num_steps = 2048 #args.num_processes = 4 args.num_processes = 4 if gettrace(): args.num_processes = 1 args.lr = 0.0001 args.entropy_coef = 0.0 args.value_loss_coef = 0.5 args.ppo_epoch = 4 args.num_mini_batch = 256 args.gamma = 0.99 args.gae_lambda = 0.95 args.clip_param = 0.2 args.use_linear_lr_decay = True #True #True #True #True args.use_proper_time_limits = True args.save_dir = "./trained_models/" + args.env_name + "/" args.load_dir = "./trained_models/" + args.env_name + "/" args.log_dir = "./logs/robot" if gettrace(): args.save_dir = "./trained_models/" + args.env_name + "debug/" args.load_dir = "./trained_models/" + args.env_name + "debug/" args.log_dir = "./logs/robot_d" args.log_interval = 30 args.hidden_size = 64 args.last_hidden_size = args.hidden_size args.recurrent_policy = False #True args.save_interval = 20 #args.seed = 1 reward_shaping = 0.01 allowMutate = False if args.seed == -1: args.seed = time.clock_gettime_ns(time.CLOCK_REALTIME) quadruppedEnv.settings.tasks_difficulty_from = args.tasks_difficulty_from quadruppedEnv.settings.tasks_difficulty_to = args.tasks_difficulty_to # 0 is a walk # 1 is a balance # 2 multitasks # 3 multitask experiments trainType = 14 filesNamesSuffix = "" if args.action_type >= 0: trainType = args.action_type makeEnvFunction = makeEnv.make_env_with_best_settings if trainType == 1: filesNamesSuffix = "balance_" makeEnvFunction = makeEnv.make_env_for_balance if trainType == 2: filesNamesSuffix = "analytical_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_analytical if trainType == 3: filesNamesSuffix = "analytical2_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_analytical2 if trainType == 4: filesNamesSuffix = "frontback_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_front_back if trainType == 5: filesNamesSuffix = "leftright_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_left_right if trainType == 6: filesNamesSuffix = "all_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_all if trainType == 7: filesNamesSuffix = "rotate_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_rotate if trainType == 8: filesNamesSuffix = "compound_" makeEnvFunction = make_env_multinetwork if trainType == 9: import pickle realEval = False allowMutate = False args.use_linear_lr_decay = True #False args.num_env_steps = 5000000 filesNamesSuffix = "test_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_test if trainType == 10: import pickle realEval = False allowMutate = False args.use_linear_lr_decay = True #False args.num_env_steps = 5000000 filesNamesSuffix = "zoo_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_test_zoo if trainType == 11: args.hidden_size = 128 #64 #128 args.last_hidden_size = args.hidden_size import pickle if gettrace(): args.num_processes = 1 else: args.num_processes = 8 realEval = False allowMutate = False args.lr = 0.00001 args.use_linear_lr_decay = True #False args.num_env_steps = 10000000 filesNamesSuffix = "zigote2_updown_" print("Samples preload") global samplesEnvData samplesEnvData = pickle.load( open("./QuadruppedWalk-v1_MoveNoPhys.samples", "rb")) # samplesEnvData = pickle.load( open( "./QuadruppedWalk-v1.samples", "rb" ) ) makeEnvFunction = makeSamplesEnv if trainType == 12: import pickle args.lr = 0.00001 args.hidden_size = 64 args.last_hidden_size = args.hidden_size filesNamesSuffix = "zigote2_front_back_" args.clip_param = 0.9 args.value_loss_coef = 0.9 makeEnvFunction = makeEnv.make_env_with_best_settings_for_train #makeEnvFunction = makeEnv.make_env_with_best_settings_for_record #makeEnv.samplesEnvData = pickle.load( open( "./QuadruppedWalk-v1_MoveNoPhys.samples", "rb" ) ) if trainType == 13: filesNamesSuffix = "all_bytasks_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_all if trainType == 14: #args.lr = 0.00001 #args.num_env_steps = 000000 #args.clip_param = 0.5 #args.value_loss_coef =0.8 #random.seed(time.clock_gettime_ns(time.CLOCK_REALTIME)) #args.num_steps = random.choice([256,512,1024,2048,4096]) #args.num_mini_batch = random.choice([32,64,256,512]) #args.ppo_epoch = random.choice([2,4,8,10]) #args.clip_param = random.choice([0.2,0.4,0.6,0.8]) #args.value_loss_coef =random.choice([0.4,0.5,0.6,0.8]) #args.lr = random.choice([0.00001,0.0001,0.00005,0.0005]) args.num_steps = 2048 args.num_mini_batch = 64 args.ppo_epoch = 8 args.lr = 0.0001 args.hidden_size = 64 args.last_hidden_size = args.hidden_size # filesNamesSuffix = args.filesNamesSuffix makeEnvFunction = makeEnv.make_env_with_best_settings_for_all ''' num_steps: 1024 num_mini_batch 64 ppo_epoch 2 clip_param: 0.2 value_loss_coef 0.6 lr 0.0001 ''' if trainType == 15: args.num_env_steps = 5000000 filesNamesSuffix = "zigote_updown_" makeEnvFunction = makeEnv.make_env_with_best_settings_for_train_analytic if trainType == 16: args.lr = 0.00001 filesNamesSuffix = "compound_tasks_" makeEnvFunction = make_env_multinetwork reward_shaper = DefaultRewardsShaper(scale_value=reward_shaping) print("ActionType ", trainType, " ", filesNamesSuffix, "seed", args.seed, "num env steps:", args.num_env_steps, " tasks_dif", args.tasks_difficulty_from, args.tasks_difficulty_to) print("Num processes:", args.num_processes) print("num_steps:", args.num_steps, "num_mini_batch", args.num_mini_batch, "ppo_epoch", args.ppo_epoch) print("clip_param:", args.clip_param, "value_loss_coef", args.value_loss_coef, "lr", args.lr) random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True args.log_dir = "/tmp/tensorboard/" #TesnorboardX writer = SummaryWriter(log_dir=args.log_dir + 'runs/{}_PPO_{}_{}'.format( datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), args.env_name, "ppo")) writer.add_scalar('options/num_steps', args.num_steps, 0) writer.add_scalar('options/num_mini_batch', args.num_mini_batch, 0) writer.add_scalar('options/ppo_epoch', args.ppo_epoch, 0) writer.add_scalar('options/clip_param', args.clip_param, 0) writer.add_scalar('options/value_loss_coef', args.value_loss_coef, 0) writer.add_scalar('options/lr', args.lr, 0) device = torch.device("cuda:0" if args.cuda else "cpu") torch.set_num_threads(1) load_dir = os.path.join(args.load_dir, args.algo) multiNetworkName = ["frontback_", "all_", "leftright_", "rotate_"] if trainType == 8: for net in multiNetworkName: bestFilename = os.path.join( load_dir, "{}_{}{}_best.pt".format(args.env_name, net, args.hidden_size)) ac, _ = torch.load(bestFilename) policies.append(PPOPlayer(ac, device)) print("Policy multi loaded: ", bestFilename) multiNetworkName2 = [ "all_bytasks_0_", "all_bytasks_1_", "all_bytasks_2_", "all_bytasks_3_", "all_bytasks_4_", "all_bytasks_5_", "all_bytasks_6_", "all_bytasks_7_", "all_bytasks_8_", "all_bytasks_9_", "all_bytasks_10_", "all_bytasks_11_", "all_bytasks_12_", ] if trainType == 16: for net in multiNetworkName2: bestFilename = os.path.join( load_dir, "{}_{}{}_best.pt".format(args.env_name, net, args.hidden_size)) ac, _ = torch.load(bestFilename) policies.append(PPOPlayer(ac, device)) print("Policy multi loaded: ", bestFilename) envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, None, device, False, normalizeOb=False, normalizeReturns=False, max_episode_steps=args.num_steps, makeEnvFunc=makeEnvFunction, num_frame_stack=1, info_keywords=( 'episode_steps', 'episode_reward', 'progress', 'servo', 'distToTarget', )) #print(envs.observation_space.shape,envs.action_space) actor_critic = Policy(envs.observation_space.shape, envs.action_space, base_kwargs={ 'recurrent': args.recurrent_policy, 'hidden_size': args.hidden_size, 'last_hidden_size': args.last_hidden_size, 'activation_layers_type': "Tanh" }) ''' # if args.load_dir not None: load_path = os.path.join(args.load_dir, args.algo) actor_critic, ob_rms = torch.load(os.path.join(load_path, args.env_name + ".pt")) ''' load_path = os.path.join( load_dir, "{}_{}{}_best.pt".format(args.env_name, filesNamesSuffix, args.hidden_size)) #load_path = os.path.join(load_path, "{}_{}{}.pt".format(args.env_name,filesNamesSuffix,args.hidden_size)) preptrained_path = "../Train/trained_models/QuadruppedWalk-v1/Train_QuadruppedWalk-v1_256.pth" loadPretrained = False if loadPretrained and os.path.isfile(preptrained_path): print("Load preptrained") abj = torch.load(preptrained_path) print(abj) print(actor_critic.base) actor_critic.base.load_state_dict() actor_critic.base.eval() if os.path.isfile(load_path) and not loadPretrained: actor_critic, ob_rms = torch.load(load_path) actor_critic.eval() print("----NN loaded: ", load_path, " -----") else: bestFilename = os.path.join( load_dir, "{}_{}{}_best_pretrain.pt".format(args.env_name, filesNamesSuffix, args.hidden_size)) if os.path.isfile(bestFilename): actor_critic, ob_rms = torch.load(bestFilename) actor_critic.eval() print("----NN loaded: ", bestFilename, " -----") maxReward = -10000.0 maxSteps = 0 minDistance = 50000.0 actor_critic.to(device) agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) if args.gail: assert len(envs.observation_space.shape) == 1 discr = gail.Discriminator( envs.observation_space.shape[0] + envs.action_space.shape[0], 100, device) file_name = os.path.join( args.gail_experts_dir, "trajs_{}.pt".format(args.env_name.split('-')[0].lower())) gail_train_loader = torch.utils.data.DataLoader( gail.ExpertDataset(file_name, num_trajectories=4, subsample_frequency=20), batch_size=args.gail_batch_size, shuffle=True, drop_last=True) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) deque_maxLen = 10 episode_rewards = deque(maxlen=deque_maxLen) episode_steps = deque(maxlen=deque_maxLen) episode_rewards_alive = deque(maxlen=deque_maxLen) episode_rewards_progress = deque(maxlen=deque_maxLen) episode_rewards_servo = deque(maxlen=deque_maxLen) episode_dist_to_target = deque(maxlen=deque_maxLen) ''' load_path = os.path.join(args.load_dir, args.algo) load_path = os.path.join(load_path, args.env_name + ".pt") actor_critic, ob_rms = torch.load(load_path) actor_critic.to(device) actor_critic.eval() #ob_rms.eval() ''' ''' args.use_gym_monitor = 1 args.monitor_dir = "./results/" monitor_path = os.path.join(args.monitor_dir, args.algo) monitor_path = os.path.join(monitor_path, args.env_name) args. if args.use_gym_monitor: env = wrappers.Monitor( env, monitor_path, video_callable=False, force=True) ''' i_episode = 0 save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass trainOnSamplesAndExit = False #False if trainOnSamplesAndExit: import pickle print("---------------------------------------") print("Samples preload") data = pickle.load(open("./QuadruppedWalk-v1_UpDown.samples", "rb")) #data = pickle.load( open( "../QuadruppedWalk-v1_NN.samples", "rb" ) ) learning_rate = 0.0001 max_episodes = 100 max_timesteps = 4000 betas = (0.9, 0.999) log_interval = 1 envSamples = SamplesEnv(data) envSamples.numSteps = max_timesteps # create a stochastic gradient descent optimizer optimizer = torch.optim.Adam(actor_critic.base.actor.parameters(), lr=learning_rate, betas=betas) #optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) # create a loss function criterion = nn.MSELoss(reduction="sum") # run the main training loop for epoch in range(max_episodes): state = envSamples.reset() time_step = 0 testReward = 0 testSteps = 0 loss_sum = 0 loss_max = 0 for t in range(max_timesteps): time_step += 1 nn_state = torch.FloatTensor((state).reshape(1, -1)).to(device) optimizer.zero_grad() net_out = actor_critic.base.forwardActor(nn_state) net_out = actor_critic.dist.fc_mean(net_out) state, reward, done, info = envSamples.step( net_out.detach().numpy()) sim_action = envSamples.recordedActions sim_action_t = torch.FloatTensor([sim_action]).to(device) loss = criterion(net_out, sim_action_t) loss.backward() optimizer.step() loss_sum += loss.mean() loss_max = max(loss_max, loss.max()) testReward += reward testSteps += 1 if done: if epoch % log_interval == 0: #print(best_action_t*scaleActions-net_out*scaleActions) if args.verboseLevel > 0: print( 'Train Episode: {} t:{} Reward:{} Loss: mean:{:.6f} max: {:.6f}' .format(epoch, t, testReward, loss_sum / t, loss_max)) print(info) reward = 0 break bestFilename = os.path.join( save_path, "{}_{}{}_best_pretrain.pt".format(args.env_name, filesNamesSuffix, args.hidden_size)) torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], bestFilename) exit(0) skipWriteBest = True if args.verboseLevel > 0: printNetwork(actor_critic.base.actor) lock(actor_critic, first=False, last=False) #if trainType==9: #allowMutate = False #lock(actor_critic,first=True,last=False) #mutate(actor_critic,power=0.00,powerLast=0.3) if args.verboseLevel > 0: printNetwork(actor_critic.base.actor) #from torchsummary import summary #summary(actor_critic.base.actor, (1, 48, 64)) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes episodeBucketIndex = 0 maxReward = -10000000000 numEval = 10 if realEval: envEval = makeEnvFunction(args.env_name) if hasattr(envEval.env, "tasks") and len(envEval.env.tasks): numEval = max(numEval, len(envEval.env.tasks)) maxReward = evaluate_policy(envEval, actor_critic, numEval * 2, render=False, device=device, verbose=args.verboseLevel) print("MaxReward on start", maxReward) noMaxRewardCount = 0 updateIndex = 0 for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) episode_r = 0.0 stepsDone = 0 for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) # Obser reward and next obs obs, reward, done, infos = envs.step(action) #envs.venv.venv.venv.envs[0].render() if args.verboseLevel > 0: index = 0 for d in done: if d: print(infos[index], flush=True) index += 1 episodeDone = False ''' index = 0 for d in done: if d: print("") print(infos[index]) index+=1 ''' for info in infos: if 'reward' in info.keys(): episodeDone = True i_episode += 1 episode_rewards.append(info['reward']) writer.add_scalar('reward/episode', info['reward'], i_episode) #print("E:",i_episode," T:",info['episode_steps'], " R:", info['episode_reward'], " D:",info['distToTarget']) if 'steps' in info.keys(): episode_steps.append(info['steps']) writer.add_scalar('reward/steps', info['steps'], i_episode) if 'alive' in info.keys(): episode_rewards_alive.append(info['alive']) writer.add_scalar('reward/alive', info['alive'], i_episode) if 'prog' in info.keys(): episode_rewards_progress.append(info['prog']) writer.add_scalar('reward/progress', info['prog'], i_episode) if 'servo' in info.keys(): episode_rewards_servo.append(info['servo']) writer.add_scalar('reward/servo', info['servo'], i_episode) if 'd2T' in info.keys(): episode_dist_to_target.append(info['d2T']) writer.add_scalar('reward/distToTarget', info['d2T'], i_episode) for val in info.keys(): if val not in [ "reward", "steps", "alive", "prog", "servo", "d2T", 'epos', 't' ]: writer.add_scalar('reward/' + val, info[val], i_episode) #if episodeDone and i_episode%10==0: # print(i_episode,"({:.1f}/{}/{:.2f}) ".format(episode_rewards[-1],episode_steps[-1],episode_dist_to_target[-1]),end='',flush=True) if episodeDone: episodeBucketIndex += 1 if args.verboseLevel > 0: print("Mean:", Fore.WHITE, np.mean(episode_rewards), Style.RESET_ALL, " Median:", Fore.WHITE, np.median(episode_rewards), Style.RESET_ALL, " max reward:", maxReward) #'''len(episode_rewards) and np.mean(episode_rewards)>maxReward and''' if realEval: if episodeBucketIndex % args.log_interval == 0 and episodeBucketIndex > args.log_interval: print("Step:", (j + 1) * args.num_processes * args.num_steps) if skipWriteBest == False: evalReward = evaluate_policy( envEval, actor_critic, numEval, device=device, verbose=args.verboseLevel) writer.add_scalar('reward/eval', evalReward, i_episode) if evalReward > maxReward: maxReward = evalReward #maxReward = np.mean(episode_rewards) bestFilename = os.path.join( save_path, "{}_{}{}_best.pt".format( args.env_name, filesNamesSuffix, args.hidden_size)) print( "Writing best reward:", Fore.GREEN, "({:.1f}/{:.1f}/{:.1f}/{}/{:.2f}) ".format( maxReward, np.mean(episode_rewards), np.median(episode_rewards), np.mean(episode_steps), episode_dist_to_target[-1]), Style.RESET_ALL, bestFilename) torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], bestFilename) noMaxRewardCount = 0 else: noMaxRewardCount += 1 if allowMutate: if noMaxRewardCount == 5: print("Mutation low last layer") lock(actor_critic, first=False, last=False) mutate(actor_critic, power=0.00, powerLast=0.01) if noMaxRewardCount == 8: print("Mutation low non last") lock(actor_critic, first=False, last=False) mutate(actor_critic, power=0.01, powerLast=0.0) if noMaxRewardCount == 11: print("Mutation low all") lock(actor_critic, first=False, last=False) mutate(actor_critic, power=0.02, powerLast=0.2) if noMaxRewardCount == 14: print("Mutation hi all") lock(actor_critic, first=False, last=False) mutate(actor_critic, power=0.03, powerLast=0.03) noMaxRewardCount = 0 if noMaxRewardCount == args.nobest_exit: exit(0) else: skipWriteBest = False else: if len(episode_rewards) and np.mean( episode_rewards ) > maxReward and j > args.log_interval: if skipWriteBest == False: maxReward = np.mean(episode_rewards) writer.add_scalar('reward/maxReward', maxReward, i_episode) bestFilename = os.path.join( save_path, "{}_{}{}_best.pt".format( args.env_name, filesNamesSuffix, args.hidden_size)) if len(episode_dist_to_target): print( "Writing best reward:", Fore.GREEN, "({:.1f}/{:.1f}/{}/{:.2f}) ".format( np.mean(episode_rewards), np.median(episode_rewards), np.mean(episode_steps), episode_dist_to_target[-1]), Style.RESET_ALL, bestFilename) else: print( "Writing best reward:", Fore.GREEN, "({:.1f}/{:.1f}/{}) ".format( np.mean(episode_rewards), np.median(episode_rewards), np.mean(episode_steps)), Style.RESET_ALL, bestFilename) torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], bestFilename) else: skipWriteBest = False # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) shaped_reward = reward_shaper(reward) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, shaped_reward, masks, bad_masks) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() if args.gail: if j >= 10: envs.venv.eval() gail_epoch = args.gail_epoch if j < 10: gail_epoch = 100 # Warm up for _ in range(gail_epoch): discr.update(gail_train_loader, rollouts, utils.get_vec_normalize(envs)._obfilt) for step in range(args.num_steps): rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma, rollouts.masks[step]) rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) value_loss, action_loss, dist_entropy = agent.update(rollouts) writer.add_scalar('reward/value_loss', value_loss, updateIndex) writer.add_scalar('reward/action_loss', action_loss, updateIndex) writer.add_scalar('reward/dist_entropy', dist_entropy, updateIndex) updateIndex += 1 rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": ''' fileName = os.path.join(save_path, "{}_{}{}.pt".format(args.env_name,filesNamesSuffix,args.hidden_size)) torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], fileName) print("Saved:",fileName, " cur avg rewards:",np.mean(episode_rewards)) fileName = os.path.join(save_path, "{}_{}{}_actor.pt".format(args.env_name,filesNamesSuffix,args.hidden_size)) torch.save(actor_critic.state_dict, fileName) print("Saved:",fileName) ''' if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() if args.verboseLevel > 0: print("") print("Updates {}, num timesteps {}, FPS {}".format( j, total_num_steps, int(total_num_steps / (end - start)))) print(" Last {} training episodes:".format( len(episode_rewards))) print( " reward mean/median {:.1f}/{:.1f} min/max {:.1f}/{:.1f}". format(np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards))) print(" steps mean/median {:.1f}/{:.1f} min/max {:.1f}/{:.1f}". format(np.mean(episode_steps), np.median(episode_steps), np.min(episode_steps), np.max(episode_steps))) if len(episode_rewards_alive): print( " alive mean/median {:.1f}/{:.1f} min/max {:.1f}/{:.1f}" .format(np.mean(episode_rewards_alive), np.median(episode_rewards_alive), np.min(episode_rewards_alive), np.max(episode_rewards_alive))) if len(episode_rewards_progress): print( " progress mean/median {:.1f}/{:.1f} min/max {:.1f}/{:.1f}" .format(np.mean(episode_rewards_progress), np.median(episode_rewards_progress), np.min(episode_rewards_progress), np.max(episode_rewards_progress))) if len(episode_rewards_servo): print( " servo mean/median {:.1f}/{:.1f} min/max {:.1f}/{:.1f}" .format(np.mean(episode_rewards_servo), np.median(episode_rewards_servo), np.min(episode_rewards_servo), np.max(episode_rewards_servo))) if len(episode_dist_to_target): print( " dist to target mean/median {:.3f}/{:.3f} min/max {:.3f}/{:.3f}" .format(np.mean(episode_dist_to_target), np.median(episode_dist_to_target), np.min(episode_dist_to_target), np.max(episode_dist_to_target))) print( " Reward/Steps {:.3f} Progress/Steps: {:.3f} entropy {:.1f} value_loss {:.5f} action_loss {:.5f}\n" .format( np.mean(episode_rewards) / np.mean(episode_steps), (0 if len(episode_rewards_progress) == 0 else np.mean(episode_rewards_progress) / np.mean(episode_steps)), dist_entropy, value_loss, action_loss))
def main(): args = get_args() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") if args.env_name.startswith("lab_"): gym_name, flow_json = make_lab_env(args.env_name) args.env_name = gym_name envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, False) actor_critic = Policy( envs.observation_space.shape, envs.action_space, base_kwargs={'recurrent': args.recurrent_policy}) actor_critic.to(device) if args.algo == 'a2c': agent = algo.A2C_ACKTR( actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo': agent = algo.PPO( actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) elif args.algo == 'acktr': agent = algo.A2C_ACKTR( actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) if args.gail: assert len(envs.observation_space.shape) == 1 discr = gail.Discriminator( envs.observation_space.shape[0] + envs.action_space.shape[0], 100, device) file_name = os.path.join( args.gail_experts_dir, "trajs_{}.pt".format( args.env_name.split('-')[0].lower())) expert_dataset = gail.ExpertDataset( file_name, num_trajectories=4, subsample_frequency=20) drop_last = len(expert_dataset) > args.gail_batch_size gail_train_loader = torch.utils.data.DataLoader( dataset=expert_dataset, batch_size=args.gail_batch_size, shuffle=True, drop_last=drop_last) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) # Obser reward and next obs obs, reward, done, infos = envs.step(action) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # If done then clean the history of observations. masks = torch.FloatTensor( [[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() if args.gail: if j >= 10: envs.venv.eval() gail_epoch = args.gail_epoch if j < 10: gail_epoch = 100 # Warm up for _ in range(gail_epoch): discr.update(gail_train_loader, rollouts, utils.get_vec_normalize(envs)._obfilt) for step in range(args.num_steps): rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma, rollouts.masks[step]) rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) value_loss, action_loss, dist_entropy = agent.update(rollouts) rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], os.path.join(save_path, args.env_name + ".pt")) if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: " "mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device)
def main(): args = get_args() trace_size = args.trace_size toke = tokenizer() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, False) actor_critic = Policy(envs.observation_space.shape, envs.action_space, base_kwargs={'recurrent': args.recurrent_policy}) actor_critic.to(device) if args.algo == 'a2c': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo': agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) elif args.algo == 'acktr': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) obs = envs.reset() tobs = torch.zeros((args.num_processes, trace_size), dtype=torch.long) #print (tobs.dtype) rollouts.obs[0].copy_(obs) rollouts.tobs[0].copy_(tobs) rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.tobs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) # Obser reward and next obs obs, reward, done, infos = envs.step(action) tobs = [] envs.render() for info in infos: if 'episode' in info.keys(): #print ("episode ", info['episode']) episode_rewards.append(info['episode']['r']) trace = info['trace'][0:trace_size] trace = [x[2] for x in trace] word_to_ix = toke.tokenize(trace) seq = prepare_sequence(trace, word_to_ix) if len(seq) < trace_size: seq = torch.zeros((trace_size), dtype=torch.long) seq = seq[:trace_size] #print (seq.dtype) tobs.append(seq) tobs = torch.stack(tobs) #print (tobs) #print (tobs.size()) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, tobs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.tobs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) value_loss, action_loss, dist_entropy = agent.update(rollouts) rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], os.path.join(save_path, args.env_name + ".pt")) if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device)
def main(): args = get_args() writer = SummaryWriter(os.path.join('logs', args.save_name), ) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") envs = make_vec_envs( basic_env.BasicFlatDiscreteEnv, args.seed, args.num_processes, args.gamma, args.log_dir, device, False, task='lift', gripper_type='RobotiqThreeFingerDexterousGripper', robot='Panda', controller='JOINT_TORQUE' if args.vel else 'JOINT_POSITION', horizon=1000, reward_shaping=True) actor_critic = Policy( envs.observation_space.shape, envs.action_space, base=Surreal, # base=OpenAI, # base=MLP_ATTN, base_kwargs={ 'recurrent': args.recurrent_policy, # 'dims': basic_env.BasicFlatEnv().modality_dims 'config': dict(act='relu' if args.relu else 'tanh', rec=args.rec, fc=args.fc) }) print(actor_critic) actor_critic.to(device) if args.algo == 'a2c': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, alpha=args.alpha, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo': agent = algo.PPO(actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) elif args.algo == 'acktr': agent = algo.A2C_ACKTR(actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True) if args.gail: assert len(envs.observation_space.shape) == 1 discr = gail.Discriminator( envs.observation_space.shape[0] + envs.action_space.shape[0], 100, device) file_name = os.path.join( args.gail_experts_dir, "trajs_{}.pt".format(args.env_name.split('-')[0].lower())) expert_dataset = gail.ExpertDataset(file_name, num_trajectories=4, subsample_frequency=20) drop_last = len(expert_dataset) > args.gail_batch_size gail_train_loader = torch.utils.data.DataLoader( dataset=expert_dataset, batch_size=args.gail_batch_size, shuffle=True, drop_last=drop_last) rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) episode_rewards = deque(maxlen=100) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes best_reward = 0 for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) writer.add_scalar('lr', agent.optimizer.param_groups[0]['lr']) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step]) # Obser reward and next obs obs, reward, done, infos = envs.step(action) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # If done then clean the history of observations. masks = torch.FloatTensor([[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() if args.gail: if j >= 10: envs.venv.eval() gail_epoch = args.gail_epoch if j < 10: gail_epoch = 100 # Warm up for _ in range(gail_epoch): discr.update(gail_train_loader, rollouts, utils.get_vec_normalize(envs)._obfilt) for step in range(args.num_steps): rollouts.rewards[step] = discr.predict_reward( rollouts.obs[step], rollouts.actions[step], args.gamma, rollouts.masks[step]) rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) value_loss, action_loss, dist_entropy = agent.update(rollouts) end = time.time() total_num_steps = (j + 1) * args.num_processes * args.num_steps if len(episode_rewards) > 1: writer.add_scalar('loss/value', value_loss, total_num_steps) writer.add_scalar('loss/policy', action_loss, total_num_steps) writer.add_scalar('experiment/num_updates', j, total_num_steps) writer.add_scalar('experiment/FPS', int(total_num_steps / (end - start)), total_num_steps) writer.add_scalar('experiment/EPISODE MEAN', np.mean(episode_rewards), total_num_steps) writer.add_scalar('experiment/EPISODE MEDIAN', np.median(episode_rewards), total_num_steps) writer.add_scalar('experiment/EPISODE MIN', np.min(episode_rewards), total_num_steps) writer.add_scalar('experiment/EPSIDOE MAX', np.max(episode_rewards), total_num_steps) rollouts.after_update() # save for every interval-th episode or for the last epoch if len(episode_rewards) > 1 and args.save_dir != "": rew = np.mean(episode_rewards) if rew > best_reward: best_reward = rew print('saved with best reward', rew) save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'obs_rms', None) ], os.path.join(save_path, args.save_name + ".pt")) if j % args.log_interval == 0 and len(episode_rewards) > 1: print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): obs_rms = utils.get_vec_normalize(envs).obs_rms evaluate(actor_critic, obs_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device) writer.close()
def main(): args = get_args() use_ppo = args.algo == 'ppo' torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) if args.cuda and torch.cuda.is_available() and args.cuda_deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True log_dir = os.path.expanduser(args.log_dir) eval_log_dir = log_dir + "_eval" utils.cleanup_log_dir(log_dir) utils.cleanup_log_dir(eval_log_dir) torch.set_num_threads(1) device = torch.device("cuda:0" if args.cuda else "cpu") envs = make_vec_envs(args.env_name, args.seed, args.num_processes, args.gamma, args.log_dir, device, False) actor_critic = Policy( envs.observation_space.shape, envs.action_space, base_kwargs={'recurrent': args.recurrent_policy, 'share_parameter': args.share_parameter}) actor_critic.to(device) return_distributions = False if args.algo == 'ppo': agent = algo.PPO( actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm) elif args.algo == 'ppo_rb': agent = algo.PPO_RB( actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, args.rb_alpha, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm ) elif args.algo == 'tr_ppo': agent = algo.TR_PPO( actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm, ppo_clip_param=args.ppo_clip_param ) return_distributions = True elif args.algo == 'tr_ppo_rb': agent = algo.TR_PPO_RB( actor_critic, args.clip_param, args.ppo_epoch, args.num_mini_batch, args.value_loss_coef, args.entropy_coef, args.rb_alpha, lr=args.lr, eps=args.eps, max_grad_norm=args.max_grad_norm, ppo_clip_param=args.ppo_clip_param ) return_distributions = True if not return_distributions: rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size) else: if actor_critic.dist_name == 'DiagGaussian': rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size, distribution_param_dim=envs.action_space.shape[0]*2 ) elif actor_critic.dist_name == 'Bernoulli' or actor_critic.dist_name == 'Categorical': rollouts = RolloutStorage(args.num_steps, args.num_processes, envs.observation_space.shape, envs.action_space, actor_critic.recurrent_hidden_state_size, distribution_param_dim=envs.action_space.n ) obs = envs.reset() rollouts.obs[0].copy_(obs) rollouts.to(device) episode_rewards = deque(maxlen=10) start = time.time() num_updates = int( args.num_env_steps) // args.num_steps // args.num_processes prev_mean_reward = None for j in range(num_updates): if args.use_linear_lr_decay: # decrease learning rate linearly utils.update_linear_schedule( agent.optimizer, j, num_updates, agent.optimizer.lr if args.algo == "acktr" else args.lr) for step in range(args.num_steps): # Sample actions with torch.no_grad(): value, action, action_log_prob, recurrent_hidden_states, parameters = actor_critic.act( rollouts.obs[step], rollouts.recurrent_hidden_states[step], rollouts.masks[step], return_distribution=True) # Obser reward and next obs obs, reward, done, infos = envs.step(action) for info in infos: if 'episode' in info.keys(): episode_rewards.append(info['episode']['r']) # If done then clean the history of observations. masks = torch.FloatTensor( [[0.0] if done_ else [1.0] for done_ in done]) bad_masks = torch.FloatTensor( [[0.0] if 'bad_transition' in info.keys() else [1.0] for info in infos]) rollouts.insert(obs, recurrent_hidden_states, action, action_log_prob, value, reward, masks, bad_masks, parameters) with torch.no_grad(): next_value = actor_critic.get_value( rollouts.obs[-1], rollouts.recurrent_hidden_states[-1], rollouts.masks[-1]).detach() rollouts.compute_returns(next_value, args.use_gae, args.gamma, args.gae_lambda, args.use_proper_time_limits) value_loss, action_loss, dist_entropy = agent.update(rollouts, use_ppo) rollouts.after_update() # save for every interval-th episode or for the last epoch if (j % args.save_interval == 0 or j == num_updates - 1) and args.save_dir != "": save_path = os.path.join(args.save_dir, args.algo) try: os.makedirs(save_path) except OSError: pass torch.save([ actor_critic, getattr(utils.get_vec_normalize(envs), 'ob_rms', None) ], os.path.join(save_path, args.env_name + ".pt")) mean_rewards = np.mean(episode_rewards) if (prev_mean_reward is not None) and (mean_rewards < prev_mean_reward) and \ (use_ppo == False) and args.revert_to_ppo and j > 3: use_ppo = True print('Revert Back to PPO Training') # args.lr = 3e-4 prev_mean_reward = mean_rewards if j % args.log_interval == 0 and len(episode_rewards) > 1: total_num_steps = (j + 1) * args.num_processes * args.num_steps end = time.time() print( "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n" .format(j, total_num_steps, int(total_num_steps / (end - start)), len(episode_rewards), np.mean(episode_rewards), np.median(episode_rewards), np.min(episode_rewards), np.max(episode_rewards), dist_entropy, value_loss, action_loss)) if (args.eval_interval is not None and len(episode_rewards) > 1 and j % args.eval_interval == 0): ob_rms = utils.get_vec_normalize(envs).ob_rms evaluate(actor_critic, ob_rms, args.env_name, args.seed, args.num_processes, eval_log_dir, device)