Beispiel #1
0
def main():
    new_map = ["SFFF", "FHFH", "FFFH", "HFFG"]
    env = FrozenLakeEnv(desc=new_map, is_slippery=IS_SLIPPERY)
    env = env.unwrapped
    succeed_episode = 0

    for i_episode in range(1000000):

        if use_random_map and i_episode % 10 == 0:
            env.close()
            new_map = random_map(HOLE_NUM)
            env = FrozenLakeEnv(desc=new_map, is_slippery=IS_SLIPPERY)
            env = env.unwrapped

        pos = env.reset()
        state = encode_state(new_map, pos)

        ep_r = 0

        while True:
            a = select_action(state)

            pos_next, r, done, info = env.step(a)
            ep_r += r
            #state_next = encode_state(new_map, pos_next)

            if args.render:
                env.render()
            model.rewards.append(r)

            if done:
                break

        finish_episode()

        episode_durations.append(ep_r)

        if ep_r > 0:
            # EPSILON = 1 - 1. / ((i_episode / 500) + 10)
            succeed_episode += 1

        if i_episode % 1000 == 1:
            print('EP: {:d} succeed rate {:4f}'.format(i_episode,
                                                       succeed_episode / 1000))
            succeed_episode = 0

        if i_episode % 5000 == 1:
            plot_durations()
Beispiel #2
0
    def test_expected(self):
        env = FrozenLakeEnv(is_slippery=False)
        policy = UserInputPolicy(env)

        s = env.reset()
        env.render()

        for i in [RIGHT, RIGHT, DOWN, DOWN, DOWN, RIGHT]:
            with MockInputFunction(return_value=i):
                a = policy(s)

            s, r, done, info = env.step(a)
            env.render()

            if done:
                break
Beispiel #3
0
    '''

    if is_done:
        delta = (reward - Q_table[state, action])
    else:
        delta = (reward + gamma * Q_table[new_state, new_action] -
                 Q_table[state, action])

    Q_table[state, action] += learning_rate * delta


reward_list = []
for k in range(N_trial + N_trial_test):

    acc_reward = 0  # Init the accumulated reward
    observation = env.reset()  # Init the state
    action = policy(Q_table, observation, epsilon)  # Init the first action

    for t in range(trial_duration):
        if render: env.render()

        new_observation, reward, done, info = env.step(
            action)  # Take the action
        new_action = policy(Q_table, new_observation, epsilon)
        update_Q_table(Q_table=Q_table,
                       state=observation,
                       action=action,
                       reward=reward,
                       new_state=new_observation,
                       new_action=new_action,
                       is_done=done)
Beispiel #4
0
max_steps = 99                # Max steps per episode
gamma = 0.95                  # Discounting rate

# Exploration parameters
epsilon = 1.0                 # Exploration rate
max_epsilon = 1.0             # Exploration probability at start
min_epsilon = 0.2            # Minimum exploration probability
decay_rate = 0.01             # Exponential decay rate for exploration prob

# List of rewards
rewards = []

# 2 For life or until learning is stopped
for episode in range(total_episodes):
    # Reset the environment
    state = env.reset()
    step = 0
    done = False
    total_rewards = 0

    for step in range(max_steps):
        # 3. Choose an action a in the current world state (s)
        ## First we randomize a number
        exp_exp_tradeoff = random.uniform(0, 1)
        # print(exp_exp_tradeoff,epsilon)

        ## If this number > greater than epsilon --> exploitation (taking the biggest Q value for this state)
        if exp_exp_tradeoff > epsilon:
            action = np.argmax(qtable[state, :])
            # print("action",action)
NUM_EPISODES = 500
# How often we print results
PRINT_EVERY_EPS = 100

environment = FrozenLakeEnv(is_slippery=False)

num_states = environment.observation_space.n
num_actions = environment.action_space.n

agent = QAgent(num_states, num_actions)

sum_reward = 0

for episode in range(NUM_EPISODES):
    done = False
    last_state = environment.reset()
    last_reward = None
    # Number of steps taken. A bit of a safeguard...
    num_steps = 0
    while not done:
        # Epsilon-greedy policy
        action = agent.get_action(last_state, environment)

        state, reward, done, info = environment.step(action)

        # A crude timeout: If we play too long without
        # completing the level, kill the game
        num_steps += 1
        if num_steps > 1000:
            print(
                "Episode timeout! Could not finish in 1000 steps. Check your actions!"
Beispiel #6
0
epsilon = 0.5


#print(Q.shape)
def epsilon_policy(state, Q, epsilon):
    a = np.argmax(Q[state, :])
    if np.random.rand() < epsilon:
        a = np.random.randint(num_actions)
        #a = env.action_space.sample()
    return a


averageepisodelength = []
for i in range(num_episodes):
    episodelength = 0
    state = env.reset()
    totalreward = 0

    rand = np.random.randn(1, env.action_space.n)
    #action = random.randint(0,num_actions-1)
    done = False
    action = epsilon_policy(state, Q, epsilon)
    #print(state,action)
    while not done:
        newstate, reward, done, q = env.step(action)
        #print(newstate, reward, done , q)
        newaction = epsilon_policy(newstate, Q, epsilon)
        #newaction = np.argmax(Q[newstate, :] + np.random.randn(1, env.action_space.n) * (1. / (i + 1)) )
        #print("A:",newaction)
        Q[state, action] = Q[state, action] + alpha * (
            reward + gamma * Q[newstate, newaction] - Q[state, action])
Beispiel #7
0

# define function approximators
func = LinearFunc(env, lr=0.01)
pi = km.SoftmaxPolicy(func, update_strategy='vanilla')
cache = km.caching.MonteCarloCache(env, gamma=0.99)


# static parameters
num_episodes = 250
num_steps = 30


# train
for ep in range(num_episodes):
    s = env.reset()
    cache.reset()

    for t in range(num_steps):
        a = pi(s)
        s_next, r, done, info = env.step(a)

        # small incentive to keep moving
        if np.array_equal(s_next, s):
            r = -0.1

        cache.add(s, a, r, done)

        if done:
            while cache:
                S, A, G = cache.pop()