def test_taking_actions():
    """Does the environment correctly change the state when told to take an action with and without stochasticity?"""
    random.seed()
    env = GridworldContinuous(0.05, 0.01)
    start = env.initial_state()
    ratio = ratio_test(lambda state: np.linalg.norm(np.asarray([state[0] - start[0], state[1] - (start[1] + env.move_mean)]), 2) < env.move_sd * 2,
                       partial(env.next_state, start, Action.up), 10000)
    assert 0.7 < ratio
    steps = 0
    s = env.initial_state()
    while not env.is_terminal(s):
        s = env.next_state(s, np.random.randint(4))
        steps += 1
    assert steps < 20000
Example #2
0
def main(argv):
    """Execute experiment."""
    args = parse_args(argv)
    random.seed(args.seed)
    logging.basicConfig(level=log_level(args.log_level))
    environment = (args.environment == 'gridworld' and Gridworld(args.gridworld_width, args.gridworld_height, args.failure_rate)) or \
                  (args.environment == 'gridworld_continuous' and GridworldContinuous(0.2, 0.01)) or \
                  (args.environment == 'combo_lock' and CombinationLock(args.gridworld_height, args.gridworld_width, 4, args.failure_rate))
    policy = (args.agent_policy == 'random'
              and RandomPolicy(environment.num_actions))
    fa = ((args.environment == 'gridworld' or args.environment == 'combo_lock') and
          TabularFA(environment.num_states(), environment.num_actions)) or \
        (args.environment == 'gridworld_continuous' and RBF(2, 5, environment.num_actions, beta=args.beta))
    agent = Agent(policy,
                  fa,
                  environment.num_actions,
                  args.alpha,
                  args.gamma,
                  args.eta,
                  args.zeta,
                  args.epsilon,
                  args.num_vi,
                  args.sim_samples,
                  args.sim_steps,
                  retain_theta=args.retain_theta,
                  subgoals=environment.create_subgoals())
    agent.policy = RandomOptionPolicy(agent, args.random_options)
    if args.agent_viz:
        agent.create_visualization(
            args.environment == 'gridworld'
            or args.environment == 'combo_lock', environment)
    # agent.exploit(np.asarray([1, 1]))
    # agent.exploit(environment.num_states()-1)
    # results_descriptor = ResultsDescriptor(args.results_interval, args.results_path, ['interval_id', 'steps'])
    # experiment_descriptor = ExperimentDescriptor(args.plan_interval, args.num_steps)
    # start(experiment_descriptor, agent, environment, results_descriptor)
    e = Experiment2(agent, environment, args.plan_interval, args.num_steps,
                    args.viz_steps)
    e.run()
Example #3
0
def test_taking_actions():
    """Does the environment correctly change the state when told to take an action with and without stochasticity?"""
    random.seed()
    env = GridworldContinuous(0.05, 0.01)
    start = env.initial_state()
    ratio = ratio_test(
        lambda state: np.linalg.norm(
            np.asarray(
                [state[0] - start[0], state[1] -
                 (start[1] + env.move_mean)]), 2) < env.move_sd * 2,
        partial(env.next_state, start, Action.up), 10000)
    assert 0.7 < ratio
    steps = 0
    s = env.initial_state()
    while not env.is_terminal(s):
        s = env.next_state(s, np.random.randint(4))
        steps += 1
    assert steps < 20000
Example #4
0
def test_termination():
    """Does the environment terminate in the correct state?"""
    env = GridworldContinuous(0.05, 0.01)
    assert not env.is_terminal(GridPosition(0, 0))
    assert env.is_terminal(GridPosition(0.97, 0.98))
def test_termination():
    """Does the environment terminate in the correct state?"""
    env = GridworldContinuous(0.05, 0.01)
    assert not env.is_terminal(GridPosition(0, 0))
    assert env.is_terminal(GridPosition(0.97, 0.98))