예제 #1
0
def main():
    # create the environment
    # env = gym.make('FrozenLake-v0')
    # uncomment next line to try the deterministic version

    # env = gym.make('Deterministic-4x4-FrozenLake-v0')
    env = gym.make('Deterministic-8x8-FrozenLake-v0')
    # env = gym.make('Stochastic-4x4-FrozenLake-v0')
    # env = gym.make('Stochastic-8x8-FrozenLake-v0')
    # env = gym.make('Deterministic-4x4-neg-reward-FrozenLake-v0')

    print_env_info(env)
    print_model_info(env, 0, lake_env.DOWN)
    print_model_info(env, 1, lake_env.DOWN)
    print_model_info(env, 14, lake_env.RIGHT)

    input('Hit enter to run a random policy...')
    gamma = 0.9
    ### random policy
    # total_reward, num_steps = run_random_policy(env)

    start_time = time.time()

    # policy iteration
    # policy, value_func, steps = rl.policy_iteration(env, gamma)
    # print(steps)

    # value iteration
    value_func, iter_value = rl.value_iteration(env, gamma)
    policy = rl.value_function_to_policy(env, gamma, value_func)

    # results
    value_matrix = np.reshape(value_func, (-1, 8))
    print(value_matrix)
    cmap = mpl.colors.LinearSegmentedColormap.from_list(
        'my_colormap', ['blue', 'green', 'yellow'], 256)

    bounds = [
        value_matrix.min(),
        value_matrix.min(),
        value_matrix.max(),
        value_matrix.max()
    ]
    norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
    img = plt.imshow(value_matrix, interpolation='nearest', cmap=cmap)

    # make a color bar
    # plt.colorbar(img,cmap=cmap,
    #                 norm=norm,boundaries=bounds,ticks=[-5,0,5])

    plt.show()

    rl.print_policy(policy, lake_env.action_names)
    print("--- %s seconds ---" % (time.time() - start_time))

    total_reward, num_steps = run_policy(env, policy)

    print('Agent received total reward of: %f' % total_reward)
    print('Agent took %d steps' % num_steps)
예제 #2
0
def run_value_iteration(env):
    """Run a random policy for the given environment.

    Logs the total reward and the number of steps until the terminal
    state was reached.

    Parameters
    ----------
    env: gym.envs.Environment
      Instance of an OpenAI gym.

    Returns
    -------
    (float, int)
      First number is the total undiscounted reward received. The
      second number is the total number of actions taken before the
      episode finished.
    """
    initial_state = env.reset()
    env.render()
    # time.sleep(1)  # just pauses so you can see the output

    total_reward = 0
    num_steps = 0
    gamma = 0.9
    tol = 1e-3
    max_iterations = 1000
    state = initial_state

    optimal_value_function, iterations = value_iteration(
        env, gamma, max_iterations, tol)
    policy = value_function_to_policy(env, gamma, optimal_value_function)

    while True:
        action_cur = policy[state]
        print(" ")
        print("step %d" % num_steps)
        print("action is %s" % action_names[action_cur])
        nextstate, reward, is_terminal, debug_info = env.step(action_cur)
        print(debug_info)
        state = nextstate
        env.render()
        print("move to state %d" % nextstate)

        total_reward += reward
        num_steps += 1

        if is_terminal:
            break

        # time.sleep(1)

    return total_reward, num_steps
예제 #3
0
def Part2():
    print("Problem 2, Part II")

    action_names = {
        lake_env.LEFT: 'L',
        lake_env.RIGHT: 'R',
        lake_env.DOWN: 'D',
        lake_env.UP: 'U'
    }
    gamma = 0.9
    env = gym.make('Stochastic-4x4-FrozenLake-v0')

    value_func, iteration = rl.value_iteration_sync(env, gamma)
    print(value_func.tolist())
    policy = rl.value_function_to_policy(env, gamma, value_func)
    rl.print_policy(policy, action_names)
    avg_reward = 0
    for episode in range(100):
        print(episode)
        total_reward, num_steps = run_policy(env, policy, gamma)
        avg_reward += total_reward
        print("reward: ", avg_reward)
    print("avg_reward: ", avg_reward / 100)

    env = gym.make('Stochastic-8x8-FrozenLake-v0')

    value_func, iteration = rl.value_iteration_sync(env, gamma)
    print(value_func.tolist())
    policy = rl.value_function_to_policy(env, gamma, value_func)
    rl.print_policy(policy, action_names)
    avg_reward = 0
    for episode in range(100):
        print(episode)
        total_reward, num_steps = run_policy(env, policy, gamma)
        avg_reward += total_reward
        print("reward: ", avg_reward)
    print("avg_reward: ", avg_reward / 100)
예제 #4
0
def main():
    # create the environment
    # env = gym.make('FrozenLake-v0')
    # uncomment next line to try the deterministic version
    gamma = 0.9
    #env = gym.make('Deterministic-4x4-FrozenLake-v0')
    #env = gym.make('Deterministic-8x8-FrozenLake-v0')
    #env = gym.make('Stochastic-4x4-FrozenLake-v0')
    #env = gym.make('Stochastic-8x8-FrozenLake-v0')
    env = gym.make('Deterministic-4x4-neg-reward-FrozenLake-v0')
    action_names = lake_env.action_names
    policy = generate_random_policy(env)
    #print_policy(policy, action_names)
    env.render()

    # input('Hit enter to run policy iteration...')
    # start = time.time()
    # policy, value_func, num_policy_imp, num_value_iters = policy_iteration(env, gamma)
    # end   = time.time()
    # print("Execute time", end-start)
    # print_policy(policy, action_names)
    # print_values(value_func)
    # print("The number of policy improvements: %d"%(num_policy_imp))
    # print("The number of value iterations: %d" % (num_value_iters))
    # #print(run_policy(env, gamma, policy))
    # #plot_values(value_func)

    input('Hit enter to run value iteration...')
    start = time.time()
    values, num_value_iters = value_iteration(env, gamma)
    policy = value_function_to_policy(env, gamma, values)
    end = time.time()
    print("Execute time", end - start)
    print_policy(policy, action_names)
    print_values(values)
    plot_values(values)
    print("The number of value iterations: %d" % (num_value_iters))
예제 #5
0
def Part1():
    print("Problem 2, Part I")

    action_names = {
        lake_env.LEFT: 'L',
        lake_env.RIGHT: 'R',
        lake_env.DOWN: 'D',
        lake_env.UP: 'U'
    }
    gamma = 0.9

    env = gym.make('Deterministic-8x8-FrozenLake-v0')

    time_before = int(round(time.time() * 1000))
    policy, value_func, policy_improvement_iteration, value_iteration = rl.policy_iteration_sync(
        env, gamma)
    time_after = int(round(time.time() * 1000))
    time_excute = time_after - time_before
    print(time_excute)
    print("policy_improvement_iteration: ", policy_improvement_iteration)
    print(value_iteration)
    print(value_func.tolist())
    rl.print_policy(policy, action_names)
    total_reward, num_steps = run_policy(env, policy, gamma)
    print("total_reward: ", total_reward)

    time_before = int(round(time.time() * 1000))
    value_func, iteration = rl.value_iteration_sync(env, gamma)
    time_after = int(round(time.time() * 1000))
    time_excute = time_after - time_before
    print(time_excute)
    print(value_func.tolist())
    print("value iteration: ", iteration)
    policy = rl.value_function_to_policy(env, gamma, value_func)
    rl.print_policy(policy, action_names)
    total_reward, num_steps = run_policy(env, policy, gamma)
    print("total_reward: ", total_reward)

    env = gym.make('Stochastic-8x8-FrozenLake-v0')

    time_before = int(round(time.time() * 1000))
    policy, value_func, policy_improvement_iteration, value_iteration = rl.policy_iteration_sync(
        env, gamma)
    time_after = int(round(time.time() * 1000))
    time_excute = time_after - time_before
    print(time_excute)
    print("policy_improvement_iteration: ", policy_improvement_iteration)
    print(value_iteration)
    print(value_func.tolist())
    rl.print_policy(policy, action_names)
    total_reward, num_steps = run_policy(env, policy, gamma)
    print("total_reward: ", total_reward)

    time_before = int(round(time.time() * 1000))
    value_func, iteration = rl.value_iteration_sync(env, gamma)
    time_after = int(round(time.time() * 1000))
    time_excute = time_after - time_before
    print(time_excute)
    print(value_func.tolist())
    print("value iteration: ", iteration)
    policy = rl.value_function_to_policy(env, gamma, value_func)
    rl.print_policy(policy, action_names)
    total_reward, num_steps = run_policy(env, policy, gamma)
    print("total_reward: ", total_reward)

    # Problem 2, Part I, h)
    env = gym.make('Deterministic-8x8-FrozenLake-v0')

    time_before = int(round(time.time() * 1000))
    policy, value_func, policy_improvement_iteration, value_iteration = rl.policy_iteration_async_ordered(
        env, gamma)
    time_after = int(round(time.time() * 1000))
    time_excute = time_after - time_before
    print(time_excute)
    print("policy_improvement_steps: ", policy_improvement_iteration)
    print("value_iteration: ", value_iteration)
    print(value_func.tolist())
    rl.print_policy(policy, action_names)
    run_policy(env, policy, gamma)

    time_before = int(round(time.time() * 1000))
    policy, value_func, policy_improvement_iteration, value_iteration = rl.policy_iteration_async_randperm(
        env, gamma)
    time_after = int(round(time.time() * 1000))
    time_excute = time_after - time_before
    print(time_excute)
    print("policy_improvement_steps: ", policy_improvement_iteration)
    print("value_iteration: ", value_iteration)

    # Problem 2, Part I, i)
    env = gym.make('Deterministic-8x8-FrozenLake-v0')

    time_before = int(round(time.time() * 1000))
    value_func, iteration = rl.value_iteration_async_ordered(env, gamma)
    time_after = int(round(time.time() * 1000))
    time_excute = time_after - time_before
    print(time_excute)
    print("value iteration: ", iteration)

    time_before = int(round(time.time() * 1000))
    value_func, iteration = rl.value_iteration_async_randperm(env, gamma)
    time_after = int(round(time.time() * 1000))
    time_excute = time_after - time_before
    print(time_excute)
    print("value iteration: ", iteration)
예제 #6
0
def main():
    # create the environment
    env = gym.make('FrozenLake-v0')
    # uncomment next line to try the deterministic version
    env = gym.make('Deterministic-4x4-neg-reward-FrozenLake-v0')
    # env = gym.make('Deterministic-4x4-FrozenLake-v0')
    # env = gym.make('Deterministic-8x8-FrozenLake-v0')
    # env = gym.make('Stochastic-4x4-FrozenLake-v0')
    # env = gym.make('Stochastic-8x8-FrozenLake-v0')

    # print_env_info(env)
    # print_model_info(env, 0, lake_env.DOWN)
    # print_model_info(env, 1, lake_env.DOWN)
    # print_model_info(env, 14, lake_env.RIGHT)

    # initialization
    gamma = 0.9
    tol = 1e-3
    max_iterations = 1000
    grid_width = 4
    env.render()

    # UNIT TEST

    ##############################################################################################
    # test random policy
    # input('Hit enter to run a random policy...')
    # total_reward, num_steps = run_random_policy(env)
    # print('Agent received total reward of: %f' % total_reward)
    # print('Agent took %d steps' % num_steps)

    ##############################################################################################
    # test policy evaluation (deterministic)
    # policy = np.zeros(env.nS, dtype = np.int)
    # for i in xrange(env.nS):
    #     policy[i] = random.randint(0, 3)
    # print("initial policy is:")
    # print_policy(policy, action_names, grid_width)
    # value_function, num_iterations = evaluate_policy(env, gamma, policy, max_iterations, tol)
    # print("number of iteration needed is %d" % num_iterations)
    # print("value function based on current policy is:")
    # print_value_function(value_function, grid_width)     # 4x4

    ##############################################################################################
    # test policy improvement
    # policy = np.zeros(env.nS, dtype = np.int)
    # print("initial policy is:")
    # print_policy(policy, action_names, grid_width)
    # value_function = np.array([0,     0,    0,     0,
    #                            0,     0,    0,     0,
    #                            0.729, 0.81, 0.729, 0,
    #                            0.0,   0.9,  1.0,   0])
    # print("value function is:")
    # print_value_function(value_function, grid_width)
    # improved, new_policy = improve_policy(env, gamma, value_function, policy)
    # print("new policy is:")
    # print_policy(new_policy, action_names, grid_width)
    # print("Is policy improved ? %s" % improved)

    ##############################################################################################
    # test policy iteration
    # start_time = time.time()
    # policy, value_func, iteration_improvement, iteration_evaluation = policy_iteration(env, gamma, max_iterations, tol)
    # end_time = time.time()
    # print("final value function is:")
    # print_value_function(value_func, grid_width)
    # print("optimal policy is:")
    # print_policy(policy, action_names, grid_width)
    # print("execution time is %s second" % (end_time - start_time))
    # print("total number of policy improvement is: %d" % iteration_improvement)
    # print("total number of policy evaluation iteration is: %d" % iteration_evaluation)

    ##############################################################################################
    # test value iteration
    start_time = time.time()
    optimal_value_function, iteration = value_iteration(
        env, gamma, max_iterations, tol)
    end_time = time.time()
    policy = value_function_to_policy(env, gamma, optimal_value_function)
    print("final value function is:")
    print_value_function(optimal_value_function, grid_width)
    print("optimal policy is:")
    print_policy(policy, action_names, grid_width)
    print("execution time is %s second" % (end_time - start_time))
    print("total number of iteration is: %d" % iteration)
예제 #7
0
def main():
    denv4 = gym.make('Deterministic-4x4-FrozenLake-v0')
    denv8 = gym.make('Deterministic-8x8-FrozenLake-v0')
    senv4 = gym.make('Stochastic-4x4-FrozenLake-v0')
    senv8 = gym.make('Stochastic-8x8-FrozenLake-v0')
    gamma = 0.9
    policy_iter_fstr = \
'''
%s: %s
  time_elapsed: %s
  pol_iter: %s
  total_val_iter: %s
  start_val: %s
'''
    value_iter_fstr = \
'''
%s: %s
  time_elapsed: %s
  iter: %s
  start_val: %s
'''

    denv4.render()
    denv8.render()

    t_start = time.time()
    (opolicy, value_func, pol_iter,
     total_val_iter) = rl.policy_iteration_sync(denv4, gamma)
    print(policy_iter_fstr %
          ('policy_iteration_sync', denv4, time.time() - t_start, pol_iter,
           total_val_iter, value_func[3]))
    opolicy = opolicy.reshape((4, 4))
    value_func = value_func.reshape((4, 4))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,4,0.1,4))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (opolicy, value_func, pol_iter,
     total_val_iter) = rl.policy_iteration_sync(denv8, gamma)
    print(policy_iter_fstr %
          ('policy_iteration_sync', denv8, time.time() - t_start, pol_iter,
           total_val_iter, value_func[3]))
    opolicy = opolicy.reshape((8, 8))
    value_func = value_func.reshape((8, 8))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,8,0.1,8))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_sync(denv4, gamma)
    print(value_iter_fstr % ('value_iteration_sync', denv4,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(denv4, gamma, value_func)
    opolicy = opolicy.reshape((4, 4))
    value_func = value_func.reshape((4, 4))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,4,0.1,4))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_sync(denv8, gamma)
    print(value_iter_fstr % ('value_iteration_sync', denv8,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(denv8, gamma, value_func)
    opolicy = opolicy.reshape((8, 8))
    value_func = value_func.reshape((8, 8))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,8,0.1,8))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (opolicy, value_func, pol_iter,
     total_val_iter) = rl.policy_iteration_async_ordered(denv8, gamma)
    print(policy_iter_fstr %
          ('policy_iteration_async_ordered', denv8, time.time() - t_start,
           pol_iter, total_val_iter, value_func[3]))
    opolicy = opolicy.reshape((8, 8))
    value_func = value_func.reshape((8, 8))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,8,0.1,8))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (opolicy, value_func, pol_iter,
     total_val_iter) = rl.policy_iteration_async_randperm(denv8, gamma)
    print(policy_iter_fstr %
          ('policy_iteration_async_randperm', denv8, time.time() - t_start,
           pol_iter, total_val_iter, value_func[3]))
    opolicy = opolicy.reshape((8, 8))
    value_func = value_func.reshape((8, 8))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,8,0.1,8))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_async_ordered(denv4, gamma)
    print(value_iter_fstr % ('value_iteration_async_ordered', denv4,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(denv4, gamma, value_func)
    opolicy = opolicy.reshape((4, 4))
    value_func = value_func.reshape((4, 4))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,4,0.1,4))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_async_ordered(denv8, gamma)
    print(value_iter_fstr % ('value_iteration_async_ordered', denv8,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(denv8, gamma, value_func)
    opolicy = opolicy.reshape((8, 8))
    value_func = value_func.reshape((8, 8))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,8,0.1,8))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_async_randperm(denv4, gamma)
    print(value_iter_fstr % ('value_iteration_async_randperm', denv4,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(denv4, gamma, value_func)
    print('rew: %s, steps: %s' %
          runPolicy(denv4, gamma, value_func, opolicy, 3))
    opolicy = opolicy.reshape((4, 4))
    value_func = value_func.reshape((4, 4))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,4,0.1,4))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_async_randperm(denv8, gamma)
    print(value_iter_fstr % ('value_iteration_async_randperm', denv8,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(denv8, gamma, value_func)
    print('rew: %s, steps: %s' %
          runPolicy(denv8, gamma, value_func, opolicy, 3))
    opolicy = opolicy.reshape((8, 8))
    value_func = value_func.reshape((8, 8))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,8,0.1,8))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_sync(senv4, gamma)
    print(value_iter_fstr % ('value_iteration_sync', senv4,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(senv4, gamma, value_func)
    avg_irew = 0
    avg_steps = 0
    for _ in xrange(0, 100):
        rew, steps = runPolicy(senv4, gamma, value_func, opolicy, 3)
        avg_irew += rew
        avg_steps += steps
    avg_irew /= 100
    avg_steps /= 100
    print('avg_rew: %s, avg_steps: %s' % (avg_irew, avg_steps))
    opolicy = opolicy.reshape((4, 4))
    value_func = value_func.reshape((4, 4))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,4,0.1,4))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_sync(senv8, gamma)
    print(value_iter_fstr % ('value_iteration_sync', senv8,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(senv8, gamma, value_func)
    avg_irew = 0
    avg_steps = 0
    for i in xrange(0, 100):
        rew, steps = runPolicy(senv8, gamma, value_func, opolicy, 3)
        avg_irew += rew
        avg_steps += steps
    avg_irew /= 100
    avg_steps /= 100
    print('avg_rew: %s, avg_steps: %s' % (avg_irew, avg_steps))
    opolicy = opolicy.reshape((8, 8))
    value_func = value_func.reshape((8, 8))
    printPolicy(opolicy)
    plt.imshow(value_func,                  \
               interpolation='nearest',     \
               cmap=plt.cm.ocean,           \
               extent=(0.1,8,0.1,8))
    plt.colorbar()
    plt.show()

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_async_custom(denv4, gamma)
    print(value_iter_fstr % ('value_iteration_async_custom', denv4,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(denv4, gamma, value_func)
    opolicy = opolicy.reshape((4, 4))
    value_func = value_func.reshape((4, 4))
    printPolicy(opolicy)

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_async_custom(denv8, gamma)
    print(value_iter_fstr % ('value_iteration_async_custom', denv8,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(denv8, gamma, value_func)
    opolicy = opolicy.reshape((8, 8))
    value_func = value_func.reshape((8, 8))
    printPolicy(opolicy)

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_async_custom(senv4, gamma)
    print(value_iter_fstr % ('value_iteration_async_custom', senv4,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(senv4, gamma, value_func)
    opolicy = opolicy.reshape((4, 4))
    value_func = value_func.reshape((4, 4))
    printPolicy(opolicy)

    t_start = time.time()
    (value_func, iter) = rl.value_iteration_async_custom(senv8, gamma)
    print(value_iter_fstr % ('value_iteration_async_custom', senv8,
                             time.time() - t_start, iter, value_func[3]))
    opolicy = rl.value_function_to_policy(senv8, gamma, value_func)
    opolicy = opolicy.reshape((8, 8))
    value_func = value_func.reshape((8, 8))
    printPolicy(opolicy)