def _enjoy(): # Launch the env with our helper function env = launch_env() print("Initialized environment") # Wrappers env = ResizeWrapper(env) env = NormalizeWrapper(env) env = ImgWrapper(env) # to make the images from 160x120x3 into 3x160x120 env = ActionWrapper(env) env = DtRewardWrapper(env) print("Initialized Wrappers") state_dim = env.observation_space.shape action_dim = env.action_space.shape[0] max_action = float(env.action_space.high[0]) # Initialize policy policy = DDPG(state_dim, action_dim, max_action, net_type="cnn") policy.load(filename='ddpg', directory='reinforcement/pytorch/models/') obs = env.reset() done = False while True: while not done: action = policy.predict(np.array(obs)) # Perform action obs, reward, done, _ = env.step(action) env.render() done = False obs = env.reset()
def _train(args): if not os.path.exists("./results"): os.makedirs("./results") if not os.path.exists(args.model_dir): os.makedirs(args.model_dir) # Launch the env with our helper function env = launch_env() print("Initialized environment") # Wrappers env = ResizeWrapper(env) env = NormalizeWrapper(env) env = ImgWrapper(env) # to make the images from 160x120x3 into 3x160x120 env = ActionWrapper(env) env = DtRewardWrapper(env) print("Initialized Wrappers") # Set seeds seed(args.seed) state_dim = env.observation_space.shape action_dim = env.action_space.shape[0] max_action = float(env.action_space.high[0]) # Initialize policy policy = DDPG(state_dim, action_dim, max_action, net_type="cnn") replay_buffer = ReplayBuffer(args.replay_buffer_max_size) print("Initialized DDPG") # Evaluate untrained policy evaluations = [evaluate_policy(env, policy)] total_timesteps = 0 timesteps_since_eval = 0 episode_num = 0 done = True episode_reward = None env_counter = 0 reward = 0 episode_timesteps = 0 print("Starting training") while total_timesteps < args.max_timesteps: print("timestep: {} | reward: {}".format(total_timesteps, reward)) if done: if total_timesteps != 0: print( ("Total T: %d Episode Num: %d Episode T: %d Reward: %f") % (total_timesteps, episode_num, episode_timesteps, episode_reward)) policy.train(replay_buffer, episode_timesteps, args.batch_size, args.discount, args.tau) # Evaluate episode if timesteps_since_eval >= args.eval_freq: timesteps_since_eval %= args.eval_freq evaluations.append(evaluate_policy(env, policy)) print("rewards at time {}: {}".format( total_timesteps, evaluations[-1])) if args.save_models: policy.save(filename='{}_{}'.format( 'ddpg', total_timesteps), directory=args.model_dir) np.savez("./results/rewards.npz", evaluations) # Reset environment env_counter += 1 obs = env.reset() done = False episode_reward = 0 episode_timesteps = 0 episode_num += 1 # Select action randomly or according to policy if total_timesteps < args.start_timesteps: action = env.action_space.sample() else: action = policy.predict(np.array(obs)) if args.expl_noise != 0: action = (action + np.random.normal( 0, args.expl_noise, size=env.action_space.shape[0])).clip( env.action_space.low, env.action_space.high) # Perform action new_obs, reward, done, _ = env.step(action) if episode_timesteps >= args.env_timesteps: done = True done_bool = 0 if episode_timesteps + 1 == args.env_timesteps else float( done) episode_reward += reward # Store data in replay buffer replay_buffer.add(obs, new_obs, action, reward, done_bool) obs = new_obs episode_timesteps += 1 total_timesteps += 1 timesteps_since_eval += 1 print("Training done, about to save..") policy.save(filename='ddpg', directory=args.model_dir) print("Finished saving..should return now!")