Ejemplo n.º 1
0
def run_policy_iteration(env_name, gamma=1.0):
    env = gym.make(env_name)
    env.seed(99)
    np.random.seed(99)

    start = timeit.default_timer()

    optimal_policy = policy_iteration(env, gamma)

    stop = timeit.default_timer()
    total_time = stop - start

    scores = evaluate_policy(env, optimal_policy)

    return scores, total_time
Ejemplo n.º 2
0
env = launch_env()

# Set seeds
seed(args.seed)

state_dim = env.observation_space.shape
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])

# Initialize policy
policy = DDPG(state_dim, action_dim, max_action, net_type="cnn")

replay_buffer = ReplayBuffer(args.replay_buffer_max_size)

# Evaluate untrained policy
evaluations = [evaluate_policy(env, policy)]

total_timesteps = 0
timesteps_since_eval = 0
episode_num = 0
done = True
episode_reward = None
env_counter = 0

while total_timesteps < args.max_timesteps:

    if done:
        print(f"Done @ {total_timesteps}")

        if total_timesteps != 0:
            print("Replay buffer length is ", len(replay_buffer.storage))
Ejemplo n.º 3
0
def _train(args):   
    if not os.path.exists("./results"):
        os.makedirs("./results")
    if not os.path.exists(args.model_dir):
        os.makedirs(args.model_dir)
        
    # Launch the env with our helper function
    env = launch_env()
    print("Initialized environment")

    # Wrappers
    env = ResizeWrapper(env)
    env = NormalizeWrapper(env)
    env = ImgWrapper(env) # to make the images from 160x120x3 into 3x160x120
    env = ActionWrapper(env)
    env = DtRewardWrapper(env)
    print("Initialized Wrappers")
    
    device = "cpu" # torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    # Set seeds
    seed(args.seed)

    state_dim = env.observation_space.shape
    action_dim = env.action_space.shape[0]
    max_action = float(env.action_space.high[0])

    # Initialize policy
    policy = DDPG(state_dim, action_dim, max_action, net_type="cnn")
    replay_buffer = utils.ReplayBuffer(args.replay_buffer_max_size)
    print("Initialized DDPG")
    
    # Evaluate untrained policy
    evaluations= [evaluate_policy(env, policy)]
   
    total_timesteps = 0
    timesteps_since_eval = 0
    episode_num = 0
    done = True
    episode_reward = None
    env_counter = 0
    reward = 0
    print("Starting training")
    while total_timesteps < args.max_timesteps:
        
        print("timestep: {} | reward: {}".format(total_timesteps, reward))
            
        if done:
            if total_timesteps != 0:
                print(("Total T: %d Episode Num: %d Episode T: %d Reward: %f") % (
                    total_timesteps, episode_num, episode_timesteps, episode_reward))
                policy.train(replay_buffer, episode_timesteps, args.batch_size, args.discount, args.tau)

                # Evaluate episode
                if timesteps_since_eval >= args.eval_freq:
                    timesteps_since_eval %= args.eval_freq
                    evaluations.append(evaluate_policy(env, policy))
                    print("rewards at time {}: {}".format(total_timesteps, evaluations[-1]))

                    if args.save_models:
                        policy.save(file_name, directory=args.model_dir)
                    np.savez("./results/{}.npz".format(file_name),evaluations)

            # Reset environment
            env_counter += 1
            obs = env.reset()
            done = False
            episode_reward = 0
            episode_timesteps = 0
            episode_num += 1

        # Select action randomly or according to policy
        if total_timesteps < args.start_timesteps:
            action = env.action_space.sample()
        else:
            action = policy.predict(np.array(obs))
            if args.expl_noise != 0:
                action = (action + np.random.normal(
                    0,
                    args.expl_noise,
                    size=env.action_space.shape[0])
                          ).clip(env.action_space.low, env.action_space.high)

        # Perform action
        new_obs, reward, done, _ = env.step(action)

        if episode_timesteps >= args.env_timesteps:
            done = True

        done_bool = 0 if episode_timesteps + 1 == args.env_timesteps else float(done)
        episode_reward += reward

        # Store data in replay buffer
        replay_buffer.add(obs, new_obs, action, reward, done_bool)

        obs = new_obs

        episode_timesteps += 1
        total_timesteps += 1
        timesteps_since_eval += 1
    
    print("Training done, about to save..")
    policy.save(filename='ddpg', directory=args.model_dir)
    print("Finished saving..should return now!")
# Launch environment
env = launch_env()
# Set seeds
seed(args.seed)

state_dim = env.get_features().shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])

# Initialize policy
policy = DDPG(state_dim, action_dim, max_action, net_type="dense")
replay_buffer = ReplayBuffer(args.replay_buffer_max_size)

# Evaluate untrained policy
rew_eval, time_eval = evaluate_policy(env, policy)

total_timesteps = 0
timesteps_since_eval = 0
episode_num = 0
done = True
episode_reward = None
env_counter = 0

# Crate our variables for logging
# while evaluating the exploration value = 0
evaluations_test = []
evaluations_eval = []

evaluations_test.append([episode_num, total_timesteps, rew_eval, 0])
# Create OrnsteinUhlenbeckActionNoise instance
Ejemplo n.º 5
0
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.shape[0]
    max_action = float(env.action_space.high[0])

    # Initialize policy
    if args.policy_name == "TD3":
        policy = TD3.TD3(state_dim, action_dim, max_action)
    elif args.policy_name == "OurDDPG":
        policy = OurDDPG.DDPG(state_dim, action_dim, max_action)
    elif args.policy_name == "DDPG":
        policy = DDPG.DDPG(state_dim, action_dim, max_action)

    replay_buffer = utils.ReplayBuffer()

    # Evaluate untrained policy
    evaluations = [evaluate_policy(policy, env)]
    writer.add_scalar('episode_count/eval_performance', evaluations[0], 0)

    timesteps_since_eval = 0
    episode_num = 0
    done = True

    timestep_range = trange(int(args.max_timesteps))

    for total_timesteps in timestep_range:

        if done:
            episode_num += 1

            if total_timesteps != 0:
                timestep_range.set_postfix({
Ejemplo n.º 6
0
	alpha=0.6
	memory_size=10000
	replay_buffer=proportional_replay.Experience(memory_size=memory_size, batch_size=args.batch_size, alpha=alpha)
	"""

    capacity = 100000
    print("---------Initialize Replay Buffer-----------")
    if args.prioritized:
        replay_buffer = per.PriorExpReplay(capacity)
    else:
        replay_buffer = utils.ReplayBuffer()

    # Evaluate untrained policy
    print("------Evaluating untrained policy---------")
    evaluations = []
    evaluations.append([utils.evaluate_policy(policy, args.env_name), 0])

    #print("------Running time-------")
    """
	while total_timesteps < args.max_timesteps:
		print("Timestep: ", total_timesteps+1)
		
		if done: 

			if total_timesteps != 0: 
				print(("Total T: %d Episode Num: %d Episode T: %d Reward: %f") % (total_timesteps, episode_num, episode_timesteps, episode_reward))
				if args.policy_name == "TD3":
					policy.train(replay_buffer, episode_timesteps, args.batch_size, args.discount, args.tau, args.policy_noise, args.noise_clip, args.policy_freq)
				else: 
					policy.train(replay_buffer, episode_timesteps, args.batch_size, args.discount, args.tau)
			
Ejemplo n.º 7
0
def TD3_trainer_sim(env,
                    sim,
                    test_unit,
                    device,
                    exploration,
                    threshold,
                    filename,
                    save_dir,
                    eval_frequency,
                    observations,
                    init_policy=None,
                    population=False,
                    env_name=None):
    print("population: ", population)

    #device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # adjustable parameters
    policy_noise = 0.2
    noise_clip = 0.5
    policy_freq = 2
    discount = 0.99
    tau = 0.005
    max_timesteps = 1e6
    start_timesteps = 25e3
    batch_size = 256
    expl_noise = 0.1

    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.shape[0]
    max_action = float(env.action_space.high[0])
    reward_fn = env.torch_reward_fn()

    # Set seeds
    env.seed(0)
    torch.manual_seed(0)
    np.random.seed(0)

    kwargs = {
        "state_dim": state_dim,
        "action_dim": action_dim,
        "max_action": max_action,
        "discount": discount,
        "tau": tau,
    }

    # Target policy smoothing is scaled wrt the action scale
    kwargs["policy_noise"] = policy_noise * max_action
    kwargs["noise_clip"] = noise_clip * max_action
    kwargs["policy_freq"] = policy_freq
    policy = TD3(env=env, device=device, **kwargs)

    # if load_model != "":
    #     policy_file = file_name if args.load_model == "default" else args.load_model
    #     policy.load(f"./models/{policy_file}")

    replay_buffer = ReplayBuffer(state_dim, action_dim)

    # Evaluate untrained policy
    evaluations = [evaluate_policy(policy, env)]

    state, done = env.reset(), False
    episode_reward = 0
    episode_timesteps = 0
    episode_num = 0
    best_avg = -2000
    episode_rewards = []

    for t in range(int(max_timesteps)):

        episode_timesteps += 1

        # Select action randomly or according to policy
        if t < start_timesteps:
            action = env.action_space.sample()
        else:
            action = (policy.select_action(np.array(state)) + np.random.normal(
                0, max_action * expl_noise, size=action_dim)).clip(
                    -max_action, max_action)

        # pre process
        state, action = torch.tensor(state).to(device).float(), torch.tensor(
            action).to(device).float()
        sim.reset(state)
        # print(action)
        # Perform action
        next_state, _, _, _ = sim.step(action, test_unit)

        # post process predictions
        next_state = next_state.detach()
        reward = reward_fn(state, action, next_state)
        done_bool = 1.0 if episode_timesteps < env.max_timesteps else 0

        # Store data in replay buffer
        replay_buffer.add(state, action, next_state, reward, done_bool)

        state = next_state
        episode_reward += reward
        # Train agent after collecting sufficient data
        if t >= start_timesteps:
            policy.train(replay_buffer, batch_size)

        if done:
            episode_rewards.append(episode_reward)
            avg_reward = np.mean(episode_rewards[-100:])
            if best_avg < avg_reward:
                best_avg = avg_reward
                print("Saving best model.... \n")
                policy.save(filename, save_dir)

            # +1 to account for 0 indexing. +0 on ep_timesteps since it will increment +1 even if done=True
            print(
                f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}"
            )
            # Reset environment
            state, done = env.reset(), False
            episode_reward = 0
            episode_timesteps = 0
            episode_num += 1

        # Evaluate episode
        if (t + 1) % eval_frequency == 0:
            evaluations.append(evaluate_policy(policy, env))
            np.save(f"eval/{filename}", evaluations)

    print("------------------------------------------------")
    print("FINAL AGENT EVAL: ", evaluate_policy(policy, env))
    print("------------------------------------------------")
    return policy