コード例 #1
0
ファイル: viz_example.py プロジェクト: dhalper1/simple_rl-1
def main():
    # Setup MDP, Agents.
    mdp = FourRoomMDP(5, 5, goal_locs=[(5, 5)], gamma=0.99, step_cost=0.01)
    # mdp = make_grid_world_from_file("octogrid.txt", num_goals=12, randomize=False)
    ql_agent = QLearningAgent(mdp.get_actions(), epsilon=0.2, alpha=0.5) 
    rm_agent = RMaxAgent(mdp.get_actions())
    viz = parse_args()
    viz = "learning"

    if viz == "value":
        # Run experiment and make plot.
        mdp.visualize_value()
    elif viz == "policy":
        # Viz policy
        value_iter = ValueIteration(mdp)
        value_iter.run_vi()
        policy = value_iter.policy
        mdp.visualize_policy(policy)
    elif viz == "agent":
        # Solve problem and show agent interaction.
        print("\n", str(ql_agent), "interacting with", str(mdp))
        run_single_agent_on_mdp(ql_agent, mdp, episodes=500, steps=200)
        mdp.visualize_agent(ql_agent)
    elif viz == "learning":
        # Run experiment and make plot.
        mdp.visualize_learning(ql_agent)
コード例 #2
0
class StochasticSAPolicy(object):
    def __init__(self, state_abstr, mdp):
        self.state_abstr = state_abstr
        self.mdp = mdp
        self.vi = ValueIteration(mdp)
        self.vi.run_vi()

    def policy(self, state):
        '''
        Args:
            (simple_rl.State)

        Returns:
            (str): An action

        Summary:
            Chooses an action among the optimal actions in the cluster. That is, roughly:

                \pi(a \mid s_a) \sim Pr_{s_g \in s_a} (a = a^*(s_a))
        '''

        abstr_state = self.state_abstr.phi(state)
        ground_states = self.state_abstr.get_ground_states_in_abs_state(
            abstr_state)

        action_distr = defaultdict(float)
        for s in ground_states:
            a = self.vi.policy(s)
            action_distr[a] += 1.0 / len(ground_states)

        sampled_distr = np.random.multinomial(1,
                                              action_distr.values()).tolist()
        indices = [i for i, x in enumerate(sampled_distr) if x > 0]

        return action_distr.keys()[indices[0]]
コード例 #3
0
def get_transition_matrix(mdp):
    '''
    Args:
        mdp

    Returns:
        T (list): transition matrix
        state_to_id (dict)
        id_to_state (dict)
    '''
    vi = ValueIteration(mdp)  # Use VI class to enumerate states
    vi.run_vi()
    vi._compute_matrix_from_trans_func()
    # q = vi.get_q_function()
    trans_matrix = vi.trans_dict

    state_to_id = {}
    id_to_state = {}
    for i, u in enumerate(trans_matrix):
        state_to_id[u] = i
        id_to_state[i] = u

    T = np.zeros((len(trans_matrix), len(trans_matrix)), dtype=np.int8)
    for i, u in enumerate(trans_matrix):
        for j, a in enumerate(trans_matrix[u]):
            for k, v in enumerate(trans_matrix[u][a]):
                if trans_matrix[u][a][v] > 0:
                    T[i][state_to_id[v]] = 1  # Node index starts from 1 (Minizinc is 1-indexed language)
    return T, state_to_id, id_to_state
コード例 #4
0
ファイル: viz_example.py プロジェクト: zkytony/simple_rl
def main():
    
    # Setup MDP, Agents.
    mdp = GridWorldMDP(width=4, height=3, init_loc=(1, 1), goal_locs=[(4, 3)], lava_locs=[(4, 2)], gamma=0.95, walls=[(2, 2)], slip_prob=0.1)
    ql_agent = QLearningAgent(mdp.get_actions(), epsilon=0.2, alpha=0.2) 
    viz = parse_args()

    # Choose viz type.
    viz = "value"

    if viz == "value":
        # --> Color corresponds to higher value.
        # Run experiment and make plot.
        mdp.visualize_value()
    elif viz == "policy":
        # Viz policy
        value_iter = ValueIteration(mdp)
        value_iter.run_vi()
        policy = value_iter.policy
        mdp.visualize_policy(policy)
    elif viz == "agent":
        # --> Press <spacebar> to advance the agent.
        # First let the agent solve the problem and then visualize the agent's resulting policy.
        print("\n", str(ql_agent), "interacting with", str(mdp))
        run_single_agent_on_mdp(ql_agent, mdp, episodes=500, steps=200)
        mdp.visualize_agent(ql_agent)
    elif viz == "learning":
        # --> Press <r> to reset.
        # Show agent's interaction with the environment.
        mdp.visualize_learning(ql_agent, delay=0.005, num_ep=500, num_steps=200)
    elif viz == "interactive":
        # Press <1>, <2>, <3>, and so on to execute action 1, action 2, etc.
    	mdp.visualize_interaction()
コード例 #5
0
 def update_policy(self):
     avg_mdp_vi = ValueIteration(compute_avg_mdp(self.active_mdp_distr),
                                 delta=0.0001,
                                 max_iterations=1000,
                                 sample_rate=5)
     avg_mdp_vi.run_vi()
     self.policy = avg_mdp_vi.policy
コード例 #6
0
def main():
    ap_map = {'a': (2, 2), 'b': (6, 3), 'c': (5, 3), 'd': (4, 2)}
    ltlformula = 'F (b & Fa)'
    # Setup MDP, Agents.
    mdp = LTLGridWorldMDP(ltltask=ltlformula,
                          ap_map=ap_map,
                          width=6,
                          height=6,
                          goal_locs=[(6, 6)],
                          slip_prob=0.2)
    mdp.automata.subproblem_flag = 0
    mdp.automata.subproblem_stay = 1
    mdp.automata.subproblem_goal = 0
    value_iter = ValueIteration(mdp, sample_rate=5)
    value_iter.run_vi()

    # Value Iteration.
    action_seq, state_seq = value_iter.plan(mdp.get_init_state())

    print("Plan for", mdp)
    for i in range(len(action_seq)):
        print("\t", action_seq[i], state_seq[i])

    print(ltlformula)
    f = open('/Users/romapatel/Desktop/actions.tsv', 'w+')
    for item in state_seq:
        f.write(str(item) + '\n')

    f.close()
    model = None
    ltl_visualiser(model)
コード例 #7
0
def main():

    # Setup MDP, Agents.
    mdp = FourRoomMDP(11, 11, goal_locs=[(11, 11)], gamma=0.9, step_cost=0.0)
    ql_agent = QLearningAgent(mdp.get_actions(), epsilon=0.2, alpha=0.4)
    viz = parse_args()

    # Choose viz type.
    viz = "learning"

    if viz == "value":
        # Run experiment and make plot.
        mdp.visualize_value()
    elif viz == "policy":
        # Viz policy
        value_iter = ValueIteration(mdp)
        value_iter.run_vi()
        policy = value_iter.policy
        mdp.visualize_policy(policy)
    elif viz == "agent":
        # Solve problem and show agent interaction.
        print("\n", str(ql_agent), "interacting with", str(mdp))
        run_single_agent_on_mdp(ql_agent, mdp, episodes=500, steps=200)
        mdp.visualize_agent(ql_agent)
    elif viz == "learning":
        # Run experiment and make plot.
        mdp.visualize_learning(ql_agent)
    elif viz == "interactive":
        mdp.visualize_interaction()
コード例 #8
0
ファイル: mr_dict.py プロジェクト: ericrosenbrown/MR_AMDP
def get_l1_policy(start_room=None,
                  goal_room=None,
                  mdp=None,
                  starting_items=None,
                  goal_items=None,
                  actions=None,
                  doors=None,
                  rooms=None):
    if mdp is None:
        mdp = FourRoomL1MDP(start_room,
                            goal_room,
                            starting_items=starting_items,
                            goal_items=goal_items,
                            actions=actions,
                            doors=doors,
                            rooms=rooms)
    vi = ValueIteration(mdp)
    vi.run_vi()

    policy = defaultdict()
    action_seq, state_seq = vi.plan(mdp.init_state)

    print 'Plan for {}:'.format(mdp)
    for i in range(len(action_seq)):
        print "\tpi[{}] -> {}".format(state_seq[i], action_seq[i])
        policy[state_seq[i]] = action_seq[i]
    return policy
コード例 #9
0
def get_optimal_policies(environment):
    '''
    Args:
        environment (simple_rl.MDPDistribution)

    Returns:
        (list)
    '''

    # Make State Abstraction
    approx_qds_test = get_sa(environment,
                             indic_func=ind_funcs._q_eps_approx_indicator,
                             epsilon=0.05)

    # True Optimal
    true_opt_vi = ValueIteration(environment)
    true_opt_vi.run_vi()
    opt_agent = FixedPolicyAgent(true_opt_vi.policy, "$\pi^*$")

    # Optimal Abstraction
    opt_det_vi = AbstractValueIteration(environment,
                                        state_abstr=approx_qds_test,
                                        sample_rate=30)
    opt_det_vi.run_vi()
    opt_det_agent = FixedPolicyAgent(opt_det_vi.policy, name="$\pi_{\phi}^*$")

    stoch_policy_obj = StochasticSAPolicy(approx_qds_test, environment)
    stoch_agent = FixedPolicyAgent(stoch_policy_obj.policy,
                                   "$\pi(a \mid s_\phi )$")

    ql_agents = [opt_agent, stoch_agent, opt_det_agent]

    return ql_agents
コード例 #10
0
def main():
    ap_map = {'a': (2, 2), 'b': (6, 3), 'c': (5, 3), 'd': (4, 2)}
    print('Automic propositions, ', ap_map)
    ltlformula = 'F (b & Fa)'
    print('LTL Formula, ', ltlformula)
    # Setup MDP, Agents.
    print('translatinggg')
    a = spot.translate('(a U b) & GFc & GFd', 'BA', 'complete')
    a.show("v" "")

    return
    mdp = LTLGridWorldMDP(ltltask=ltlformula,
                          ap_map=ap_map,
                          width=6,
                          height=6,
                          goal_locs=[(6, 6)],
                          slip_prob=0.2)

    mdp.automata.subproblem_flag = 0
    mdp.automata.subproblem_stay = 1
    mdp.automata.subproblem_goal = 0
    value_iter = ValueIteration(mdp, sample_rate=5)
    value_iter.run_vi()

    # Value Iteration.
    print('Value iteration')
    action_seq, state_seq = value_iter.plan(mdp.get_init_state())

    print("Plan for", mdp)
    for i in range(len(action_seq)):
        print("\t", action_seq[i], state_seq[i])
コード例 #11
0
def main():
    # Setup MDP, Agents.
    mdp = GridWorldMDP(width=6, height=6, goal_locs=[(6, 6)], slip_prob=0.2)
    value_iter = ValueIteration(mdp, sample_rate=5)
    value_iter.run_vi()

    # Value Iteration.
    action_seq, state_seq = value_iter.plan(mdp.get_init_state())

    print("Plan for", mdp)
    for i in range(len(action_seq)):
        print("\t", action_seq[i], state_seq[i])
コード例 #12
0
    def get_l1_policy(domain):
        vi = ValueIteration(domain, sample_rate=1)
        vi.run_vi()

        policy = defaultdict()
        action_seq, state_seq = vi.plan(domain.init_state)

        print('Plan for {}:'.format(domain))
        for i in range(len(action_seq)):
            print("\tpi[{}] -> {}\n".format(state_seq[i], action_seq[i]))
            policy[state_seq[i]] = action_seq[i]

        return policy
コード例 #13
0
    def get_l1_policy(start_room=None, goal_room=None, mdp=None):
        if mdp is None:
            mdp = CubeL1MDP(start_room, goal_room)
        vi = ValueIteration(mdp)
        vi.run_vi()

        policy = defaultdict()
        action_seq, state_seq = vi.plan(mdp.init_state)

        print('Plan for {}:'.format(mdp))
        for i in range(len(action_seq)):
            print("\tpi[{}] -> {}".format(state_seq[i], action_seq[i]))
            policy[state_seq[i]] = action_seq[i]
        return policy
コード例 #14
0
    def get_l1_policy(oomdp=None):
        if oomdp is None:
            oomdp = TaxiL1OOMDP()
        vi = ValueIteration(oomdp, sample_rate=1)
        vi.run_vi()

        policy = defaultdict()
        action_seq, state_seq = vi.plan(oomdp.init_state)

        print('Plan for {}:'.format(oomdp))
        for i in range(len(action_seq)):
            print("\tpi[{}] -> {}\n".format(state_seq[i], action_seq[i]))
            policy[state_seq[i]] = action_seq[i]

        return policy
コード例 #15
0
def main():
    # Setup MDP, Agents.
    size = 5
    agent = {
        "x": 1,
        "y": 1,
        "dx": 1,
        "dy": 0,
        "dest_x": size,
        "dest_y": size,
        "has_block": 0
    }
    blocks = [{"x": size, "y": 1}]
    lavas = [{
        "x": x,
        "y": y
    } for x, y in map(lambda z: (z + 1, (size + 1) / 2), xrange(size))]

    mdp = TrenchOOMDP(size, size, agent, blocks, lavas)
    ql_agent = QLearnerAgent(actions=mdp.get_actions())
    rand_agent = RandomAgent(actions=mdp.get_actions())

    # Run experiment and make plot.
    # run_agents_on_mdp([ql_agent, rand_agent], mdp, instances=30, episodes=250, steps=250)

    vi = ValueIteration(mdp, delta=0.0001, max_iterations=5000)
    iters, val = vi.run_vi()
    print " done."
    states = vi.get_states()
    num_states = len(states)
    print num_states, states
コード例 #16
0
def main():
    ap_map = {'a': (2,2),'b': (6,3), 'c': (5,3), 'd': (4,2)}
    ltlformula = 'F (b & Fa)'
    # Setup MDP, Agents.
    mdp = LTLGridWorldMDP(ltltask=ltlformula, ap_map=ap_map, width=6, height=6, goal_locs=[(6, 6)], slip_prob=0.2)
    mdp.automata.subproblem_flag = 0
    mdp.automata.subproblem_stay = 1
    mdp.automata.subproblem_goal = 0
    value_iter = ValueIteration(mdp, sample_rate=5)
    value_iter.run_vi()

    # Value Iteration.
    action_seq, state_seq = value_iter.plan(mdp.get_init_state())

    print("Plan for", mdp)
    for i in range(len(action_seq)):
        print("\t", action_seq[i], state_seq[i])
コード例 #17
0
def generate_agent(mdp_class, data_loc, mdp_parameters, visualize=False):
    try:
        with open('models/' + data_loc + '/vi_agent.pickle', 'rb') as f:
            mdp_agent, vi_agent = pickle.load(f)
    except:
        mdp_agent = make_mdp.make_custom_mdp(mdp_class, mdp_parameters)
        vi_agent = ValueIteration(mdp_agent, sample_rate=1)
        vi_agent.run_vi()

        with open('models/' + data_loc + '/vi_agent.pickle', 'wb') as f:
            pickle.dump((mdp_agent, vi_agent), f)

    # Visualize agent
    if visualize:
        fixed_agent = FixedPolicyAgent(vi_agent.policy)
        mdp_agent.visualize_agent(fixed_agent)
        mdp_agent.reset()  # reset the current state to the initial state
        mdp_agent.visualize_interaction()
コード例 #18
0
ファイル: ROSMDPClass.py プロジェクト: ericrosenbrown/MR_AMDP
def get_l1_policy(start_room=None, goal_room=None, mdp=None):
    if mdp is None:
        mdp = FourRoomL1MDP(start_room,
                            goal_room,
                            starting_items=[2, 0],
                            goal_items=[2,
                                        1])  #room 2, light off =0, light on =1
    vi = ValueIteration(mdp)
    vi.run_vi()

    policy = defaultdict()
    action_seq, state_seq = vi.plan(mdp.init_state)

    print 'Plan for {}:'.format(mdp)
    for i in range(len(action_seq)):
        print "\tpi[{}] -> {}".format(state_seq[i], action_seq[i])
        policy[state_seq[i]] = action_seq[i]
    return policy
コード例 #19
0
def main():

    args = parse_args()
    mdp = generate_MDP(args.width, args.height, args.i_loc, args.g_loc,
                       args.l_loc, args.gamma, args.Walls, args.slip)

    ql_agent = QLearningAgent(mdp.get_actions(),
                              epsilon=args.epsilon,
                              alpha=args.alpha,
                              explore=args.explore,
                              anneal=args.anneal)
    viz = args.mode

    if viz == "value":
        # --> Color corresponds to higher value.
        # Run experiment and make plot.
        mdp.visualize_value()
    elif viz == "policy":
        # Viz policy
        value_iter = ValueIteration(mdp)
        value_iter.run_vi()
        mdp.visualize_policy_values(
            (lambda state: value_iter.policy(state)),
            (lambda state: value_iter.value_func[state]))
    elif viz == "agent":
        # --> Press <spacebar> to advance the agent.
        # First let the agent solve the problem and then visualize the agent's resulting policy.
        print("\n", str(ql_agent), "interacting with", str(mdp))
        rand_agent = RandomAgent(actions=mdp.get_actions())
        run_agents_on_mdp([rand_agent, ql_agent],
                          mdp,
                          open_plot=True,
                          episodes=60,
                          steps=200,
                          instances=5,
                          success_reward=1)
        # mdp.visualize_agent(ql_agent)
    elif viz == "learning":
        # --> Press <r> to reset.
        # Show agent's interaction with the environment.
        mdp.visualize_learning(ql_agent,
                               delay=0.005,
                               num_ep=500,
                               num_steps=200)
コード例 #20
0
def main():
    mdp1 = GridWorldMDP(width=2,
                        height=1,
                        init_loc=(1, 1),
                        goal_locs=[(2, 1)],
                        slip_prob=0.5,
                        gamma=0.5)

    vi = ValueIteration(mdp1)
    iters, value = vi.run_vi()
    print("value=", value)
コード例 #21
0
def run_plain_pMDP(init_loc, ltl_formula, cube_env, ap_maps, verbose=False):
    start_time = time.time()
    mdp = RoomCubePlainMDP(init_loc=init_loc,
                           ltl_formula=ltl_formula,
                           env_file=[cube_env],
                           ap_maps=ap_maps)

    value_iter = ValueIteration(mdp, sample_rate=1, max_iterations=50)
    value_iter.run_vi()

    # Value Iteration
    action_seq, state_seq = value_iter.plan(mdp.get_init_state())

    computing_time = time.time() - start_time

    # Print
    if verbose:
        print("=====================================================")
        print("Plain: Plan for ", ltl_formula)
        for i in range(len(action_seq)):
            room_number, floor_number = mdp._get_abstract_number(state_seq[i])

            print("\t {} in room {} on the floor {}, {}".format(
                state_seq[i], room_number, floor_number, action_seq[i]))
        room_number, floor_number = mdp._get_abstract_number(state_seq[-1])
        print("\t {} in room {} on the floor {}".format(
            state_seq[-1], room_number, floor_number))

    # success?
    if len(state_seq) <= 1:
        flag_success = -1
    else:
        if mdp.automata.aut_spot.state_is_accepting(state_seq[-1].q):
            flag_success = 1
        else:
            flag_success = 0

    return computing_time, len(
        action_seq
    ), flag_success, state_seq, action_seq, value_iter.get_num_backups_in_recent_run(
    )
コード例 #22
0
def main():
    # Setup MDP, Agents.
    mdp = FourRoomMDP(9, 9, goal_locs=[(9, 9)], gamma=0.95)
    ql_agent = QLearnerAgent(mdp.get_actions())

    viz = parse_args()

    if viz == "value":
        # Run experiment and make plot.
        mdp.visualize_value()
    elif viz == "policy":
        # Viz policy
        vi = ValueIteration(mdp)
        vi.run_vi()
        policy = vi.policy
        mdp.visualize_policy(policy)
    elif viz == "agent":
        # Solve problem and show agent interaction.
        print "\n", str(ql_agent), "interacting with", str(mdp)
        run_single_agent_on_mdp(ql_agent, mdp, episodes=500, steps=200)
        mdp.visualize_agent(ql_agent)
コード例 #23
0
def make_multitask_sa_info_sa(mdp_distr, beta, is_deterministic_ib=False):
    '''
    Args:
        mdp_distr (simple_rl.MDPDistribution)
        beta (float)
        is_deterministic_ib (float)

    Returns:
        (simple_rl.StateAbstraction)
    '''

    master_sa = None
    all_state_absr = []
    for mdp in mdp_distr.get_all_mdps():

        # Get demo policy.
        vi = ValueIteration(mdp)
        vi.run_vi()
        demo_policy = get_lambda_policy(
            make_det_policy_eps_greedy(vi.policy,
                                       vi.get_states(),
                                       mdp.get_actions(),
                                       epsilon=0.2))

        # Get abstraction.
        pmf_s_phi, phi_pmf, abstr_policy_pmf = run_info_sa(
            mdp,
            demo_policy,
            beta=beta,
            is_deterministic_ib=is_deterministic_ib)
        crisp_sa = convert_prob_sa_to_sa(ProbStateAbstraction(phi_pmf))
        all_state_absr.append(crisp_sa)

    # Make master state abstr by intersection.
    vi = ValueIteration(mdp_distr.get_all_mdps()[0])
    ground_states = vi.get_states()

    master_sa = sa_helpers.merge_state_abstr(all_state_absr, ground_states)

    return master_sa
コード例 #24
0
def main(open_plot=True):

    # Setup MDP.

    args = parse_args()
    mdp = generate_MDP(args.width, args.height, args.i_loc, args.g_loc,
                       args.l_loc, args.gamma, args.Walls, args.slip)

    if args.visualize:
        value_iter = ValueIteration(mdp)
        value_iter.run_vi()
        mdp.visualize_policy_values(
            (lambda state: value_iter.policy(state)),
            (lambda state: value_iter.value_func[state]))

    else:
        custom_q = parse_custom_q_table(args.custom_q, args.default_q)

        agents = []
        for agent in args.agents:
            if agent == 'q_learning':
                agents.append(QLearningAgent(actions=mdp.get_actions()))
            elif agent == 'potential_q':
                agents.append(
                    QLearningAgent(actions=mdp.get_actions(),
                                   custom_q_init=custom_q,
                                   name="Potential_Q"))
            elif agent == 'random':
                agents.append(RandomAgent(actions=mdp.get_actions()))
            elif agent == 'rmax':
                agents.append(RMaxAgent(mdp.get_actions()))

        # Run experiment and make plot.
        run_agents_on_mdp(agents,
                          mdp,
                          instances=1,
                          episodes=100,
                          steps=100,
                          open_plot=open_plot,
                          verbose=True)
コード例 #25
0
ファイル: brtdp_example.py プロジェクト: david-abel/simple_rl
class MonotoneLowerBound(Planner):
    def __init__(self, mdp, name='MonotoneUpperBound'):
        relaxed_mdp = MonotoneLowerBound._construct_deterministic_relaxation_mdp(mdp)

        Planner.__init__(self, relaxed_mdp, name)
        self.vi = ValueIteration(relaxed_mdp)
        self.states = self.vi.get_states()
        self.vi._compute_matrix_from_trans_func()
        self.vi.run_vi()
        self.lower_values = self._construct_lower_values()

    @staticmethod
    def _construct_deterministic_relaxation_mdp(mdp):
        relaxed_mdp = copy.deepcopy(mdp)
        relaxed_mdp.set_slip_prob(0.0)
        return relaxed_mdp

    def _construct_lower_values(self):
        values = defaultdict()
        for state in self.states:
            values[state] = self.vi.get_value(state)
        return values
コード例 #26
0
def main():
    # Grab experiment params.
    # Switch between Upworld and Trench
    mdp_class = "upworld"
    # mdp_class = "trench"
    grid_lim = 20 if mdp_class == 'upworld' else 7
    gamma = 0.95
    vanilla_file = "vi.csv"
    sa_file = "vi-$\phi_{Q_d^*}.csv"
    file_prefix = "results/planning-" + mdp_class + "/"
    clear_files(dir_name=file_prefix)

    for grid_dim in xrange(3, grid_lim):
        # ======================
        # == Make Environment ==
        # ======================
        environment = make_mdp.make_mdp(mdp_class=mdp_class, grid_dim=grid_dim)
        environment.set_gamma(gamma)

        # =======================
        # == Make Abstractions ==
        # =======================
        sa_qds = get_sa(environment,
                        indic_func=ind_funcs._q_disc_approx_indicator,
                        epsilon=0.01)

        # ============
        # == Run VI ==
        # ============
        vanilla_vi = ValueIteration(environment, delta=0.0001, sample_rate=15)
        sa_vi = AbstractValueIteration(ground_mdp=environment,
                                       state_abstr=sa_qds)

        print "Running VIs."
        start_time = time.clock()
        vanilla_iters, vanilla_val = vanilla_vi.run_vi()
        vanilla_time = round(time.clock() - start_time, 2)

        start_time = time.clock()
        sa_iters, sa_val = sa_vi.run_vi()
        sa_time = round(time.clock() - start_time, 2)

        print "vanilla", vanilla_iters, vanilla_val, vanilla_time
        print "sa:", sa_iters, sa_val, sa_time

        write_datum(file_prefix + "iters/" + vanilla_file, vanilla_iters)
        write_datum(file_prefix + "iters/" + sa_file, sa_iters)

        write_datum(file_prefix + "times/" + vanilla_file, vanilla_time)
        write_datum(file_prefix + "times/" + sa_file, sa_time)
コード例 #27
0
class MonotoneLowerBound(Planner):
    def __init__(self, mdp, name='MonotoneUpperBound'):
        relaxed_mdp = MonotoneLowerBound._construct_deterministic_relaxation_mdp(
            mdp)

        Planner.__init__(self, relaxed_mdp, name)
        self.vi = ValueIteration(relaxed_mdp)
        self.states = self.vi.get_states()
        self.vi._compute_matrix_from_trans_func()
        self.vi.run_vi()
        self.lower_values = self._construct_lower_values()

    @staticmethod
    def _construct_deterministic_relaxation_mdp(mdp):
        relaxed_mdp = copy.deepcopy(mdp)
        relaxed_mdp.set_slip_prob(0.0)
        return relaxed_mdp

    def _construct_lower_values(self):
        values = defaultdict()
        for state in self.states:
            values[state] = self.vi.get_value(state)
        return values
コード例 #28
0
def evaluate_multitask_sa(multitask_sa, mdp_distr, samples=10):
    '''
    Args:
        multitask_sa (simple_rl.abstraction.StateAbstraction)
        mdp_distr (simple_rl.mdp.MDPDistribution)
        samples (float)
    '''

    # Average value over @samples.
    avg_opt_val = 0.0
    avg_abstr_opt_val = 0.0
    for i in range(samples):
        mdp = mdp_distr.sample()

        # Optimal Policy.
        vi = ValueIteration(mdp)
        vi.run_vi()
        opt_agent = FixedPolicyAgent(vi.policy)

        # Evaluate Optimal Abstract Policy.
        # abstr_mdp = make_abstr_mdp(mdp, state_abstr=multitask_sa)
        # abstr_vi = ValueIteration(abstr_mdp, sample_rate=20)
        abstr_vi = AbstractValueIteration(mdp, state_abstr=multitask_sa)
        abstr_vi.run_vi()
        abstr_opt_policy_mapper = SAVI(multitask_sa, abstr_vi.policy)
        abstr_opt_agent = FixedPolicyAgent(abstr_opt_policy_mapper.policy,
                                           "abstract")

        # Compare.
        avg_opt_val += evaluate_agent(opt_agent, mdp) / samples
        avg_abstr_opt_val += evaluate_agent(abstr_opt_agent, mdp) / samples

    print "Ground:", multitask_sa.get_num_ground_states(), round(
        avg_opt_val, 4)
    print "Abstract:", multitask_sa.get_num_abstr_states(), round(
        avg_abstr_opt_val, 4)
    print
コード例 #29
0
def _make_mini_mdp_option_policy(mini_mdp):
    '''
    Args:
        mini_mdp (MDP)

    Returns:
        Policy
    '''
    # Solve the MDP defined by the terminal abstract state.
    mini_mdp_vi = ValueIteration(mini_mdp, delta=0.005, max_iterations=500, sample_rate=20)
    iters, val = mini_mdp_vi.run_vi()

    o_policy_dict = make_dict_from_lambda(mini_mdp_vi.policy, mini_mdp_vi.get_states())
    o_policy = PolicyFromDict(o_policy_dict)

    return o_policy.get_action, mini_mdp_vi
コード例 #30
0
ファイル: info_sa.py プロジェクト: apragupta/IB_SA_simple_rl
def main():

    # Make MDP.
    grid_dim = 11
    mdp = FourRoomMDP(width=grid_dim, height=grid_dim, init_loc=(1, 1), slip_prob=0.05, goal_locs=[(grid_dim, grid_dim)], gamma=0.99)

    # Experiment Type.
    exp_type = "learn_w_abstr"

    # For comparing policies and visualizing.
    beta = 1
    is_deterministic_ib = True
    is_agent_in_control = True

    # For main plotting experiment.
    beta_range = list(chart_utils.drange(0.0, 4.0, 1.0))
    instances = 1

    # Get demo policy.
    vi = ValueIteration(mdp)
    _, val = vi.run_vi()

    # Epsilon greedy policy
    demo_policy = get_lambda_policy(make_det_policy_eps_greedy(vi.policy, vi.get_states(), mdp.get_actions(), epsilon=0.1))

    if exp_type == "plot_info_sa_val_and_num_states":
        # Makes the main two plots.
        make_info_sa_val_and_size_plots(mdp, demo_policy, beta_range, instances=instances, is_agent_in_control=is_agent_in_control)
    elif exp_type == "compare_policies":
        # Makes a plot comparing value of pi-phi combo from info_sa with \pi_d.
        info_sa_compare_policies(mdp, demo_policy, beta=beta, is_deterministic_ib=is_deterministic_ib, is_agent_in_control=is_agent_in_control)
    elif exp_type == "visualize_info_sa_abstr":
        # Visualize the state abstraction found by info_sa.
        info_sa_visualize_abstr(mdp, demo_policy, beta=beta, is_deterministic_ib=is_deterministic_ib, is_agent_in_control=is_agent_in_control)
    elif exp_type == "learn_w_abstr":
        # Run learning experiments for different settings of \beta.
        learn_w_abstr(mdp, demo_policy, is_deterministic_ib=is_deterministic_ib)
    elif exp_type == "planning":
        info_sa_planning_experiment()
コード例 #31
0
ファイル: info_sa.py プロジェクト: apragupta/IB_SA_simple_rl
def info_sa_planning_experiment(min_grid_size=5, max_grid_size=11, beta=10.0):
    '''
    Args:
        min_grid_size (int)
        max_grid_size (int)
        beta (float): Hyperparameter for InfoSA.

    Summary:
        Writes num iterations and time (seconds) for planning with and without abstractions.
    '''
    vanilla_file = "vi.csv"
    sa_file = "vi-$\\phi$.csv"
    file_prefix = os.path.join("results", "planning-four_room")
    
    clear_files(dir_name=file_prefix)

    for grid_dim in xrange(min_grid_size, max_grid_size + 1):
        # ======================
        # == Make Environment ==
        # ======================
        mdp = FourRoomMDP(width=grid_dim, height=grid_dim, init_loc=(1, 1), goal_locs=[(grid_dim, grid_dim)], gamma=0.9)
        
        # Get demo policy.
        vi = ValueIteration(mdp)
        vi.run_vi()
        demo_policy = get_lambda_policy(make_det_policy_eps_greedy(vi.policy, vi.get_states(), mdp.get_actions(), epsilon=0.2))

        # =======================
        # == Make Abstractions ==
        # =======================
        pmf_s_phi, phi_pmf, abstr_policy = run_info_sa(mdp, demo_policy, iters=500, beta=beta, convergence_threshold=0.00001)
        lambda_abstr_policy = get_lambda_policy(abstr_policy)
        prob_s_phi = ProbStateAbstraction(phi_pmf)
        crisp_s_phi = convert_prob_sa_to_sa(prob_s_phi)

        # ============
        # == Run VI ==
        # ============
        vanilla_vi = ValueIteration(mdp, delta=0.0001, sample_rate=25)
        sa_vi = AbstractValueIteration(ground_mdp=mdp, state_abstr=crisp_s_phi, delta=0.0001, vi_sample_rate=25, amdp_sample_rate=25)

        # ==========
        # == Plan ==
        # ==========
        print "Running VIs."
        start_time = time.clock()
        vanilla_iters, vanilla_val = vanilla_vi.run_vi()
        vanilla_time = round(time.clock() - start_time, 2)

        mdp.reset()
        start_time = time.clock()
        sa_iters, sa_abs_val = sa_vi.run_vi()
        sa_time = round(time.clock() - start_time, 2)
        sa_val = evaluate_agent(FixedPolicyAgent(sa_vi.policy), mdp, instances=25)

        print "\n" + "*"*20
        print "Vanilla", "\n\t Iters:", vanilla_iters, "\n\t Value:", round(vanilla_val, 4), "\n\t Time:", vanilla_time
        print 
        print "Phi:", "\n\t Iters:", sa_iters, "\n\t Value:", round(sa_val, 4), "\n\t Time:", sa_time
        print "*"*20 + "\n\n"

        write_datum(os.path.join(file_prefix, "iters", vanilla_file), vanilla_iters)
        write_datum(os.path.join(file_prefix, "iters", sa_file), sa_iters)

        write_datum(os.path.join(file_prefix, "times", vanilla_file), vanilla_time)
        write_datum(os.path.join(file_prefix, "times", sa_file), sa_time)
コード例 #32
0
def draw_state(screen,
               cleanup_mdp,
               state,
               policy=None,
               action_char_dict={},
               show_value=False,
               agent=None,
               draw_statics=False,
               agent_shape=None):
    '''
    Args:
        screen (pygame.Surface)
        grid_mdp (MDP)
        state (State)
        show_value (bool)
        agent (Agent): Used to show value, by default uses VI.
        draw_statics (bool)
        agent_shape (pygame.rect)

    Returns:
        (pygame.Shape)
    '''
    # Make value dict.
    val_text_dict = defaultdict(lambda: defaultdict(float))
    if show_value:
        if agent is not None:
            # Use agent value estimates.
            for s in agent.q_func.keys():
                val_text_dict[s.x][s.y] = agent.get_value(s)
        else:
            # Use Value Iteration to compute value.
            vi = ValueIteration(cleanup_mdp)
            vi.run_vi()
            for s in vi.get_states():
                val_text_dict[s.x][s.y] = vi.get_value(s)

    # Make policy dict.
    policy_dict = defaultdict(lambda: defaultdict(str))
    if policy:
        vi = ValueIteration(cleanup_mdp)
        vi.run_vi()
        for s in vi.get_states():
            policy_dict[s.x][s.y] = policy(s)

    # Prep some dimensions to make drawing easier.
    scr_width, scr_height = screen.get_width(), screen.get_height()
    width_buffer = scr_width / 10.0
    height_buffer = 30 + (scr_height / 10.0)  # Add 30 for title.

    width = cleanup_mdp.width
    height = cleanup_mdp.height

    cell_width = (scr_width - width_buffer * 2) / width
    cell_height = (scr_height - height_buffer * 2) / height
    # goal_locs = grid_mdp.get_goal_locs()
    # lava_locs = grid_mdp.get_lavacc_locs()
    font_size = int(min(cell_width, cell_height) / 4.0)
    reg_font = pygame.font.SysFont("CMU Serif", font_size)
    cc_font = pygame.font.SysFont("Courier", font_size * 2 + 2)

    # room_locs = [(x + 1, y + 1) for room in cleanup_mdp.rooms for (x, y) in room.points_in_room]
    door_locs = set([(door.x + 1, door.y + 1) for door in state.doors])

    # Draw the static entities.
    # print(draw_statics)
    # draw_statics = True
    # if draw_statics:
        # For each row:
    for i in range(width):
        # For each column:
        for j in range(height):

            top_left_point = width_buffer + cell_width * i, height_buffer + cell_height * j
            r = pygame.draw.rect(screen, (46, 49, 49), top_left_point + (cell_width, cell_height), 3)

            # if policy and not grid_mdp.is_wall(i+1, height - j):
            if policy and (i + 1, height - j) in cleanup_mdp.legal_states:
                a = policy_dict[i + 1][height - j]
                if a not in action_char_dict:
                    text_a = a
                else:
                    text_a = action_char_dict[a]
                text_center_point = int(top_left_point[0] + cell_width / 2.0 - 10), int(
                    top_left_point[1] + cell_height / 3.0)
                text_rendered_a = cc_font.render(text_a, True, (46, 49, 49))
                screen.blit(text_rendered_a, text_center_point)

            # if show_value and not grid_mdp.is_wall(i+1, grid_mdp.height - j):
            if show_value and (i + 1, height - j) in cleanup_mdp.legal_states:
                # Draw the value.
                val = val_text_dict[i + 1][height - j]
                color = mdpv.val_to_color(val)
                pygame.draw.rect(screen, color, top_left_point + (cell_width, cell_height), 0)
                # text_center_point = int(top_left_point[0] + cell_width/2.0 - 10), int(top_left_point[1] + cell_height/7.0)
                # text = str(round(val,2))
                # text_rendered = reg_font.render(text, True, (46, 49, 49))
                # screen.blit(text_rendered, text_center_point)

            # if grid_mdp.is_wall(i+1, grid_mdp.height - j):
            if (i + 1, height - j) not in cleanup_mdp.legal_states:
                # Draw the walls.
                top_left_point = width_buffer + cell_width * i + 5, height_buffer + cell_height * j + 5
                pygame.draw.rect(screen, (94, 99, 99), top_left_point + (cell_width - 10, cell_height - 10), 0)

            if (i + 1, height - j) in door_locs:
                # Draw door
                # door_color = (66, 83, 244)
                door_color = (0, 0, 0)
                top_left_point = width_buffer + cell_width * i + 5, height_buffer + cell_height * j + 5
                pygame.draw.rect(screen, door_color, top_left_point + (cell_width - 10, cell_height - 10), 0)

            else:
                room = cleanup_mdp.check_in_room(state.rooms, i + 1 - 1, height - j - 1)  # Minus 1 for inconsistent x, y
                if room:
                    top_left_point = width_buffer + cell_width * i + 5, height_buffer + cell_height * j + 5
                    room_rgb = _get_rgb(room.color)
                    pygame.draw.rect(screen, room_rgb, top_left_point + (cell_width - 10, cell_height - 10), 0)

            block = cleanup_mdp.find_block(state.blocks, i + 1 - 1, height - j - 1)
            # print(state)
            # print(block)
            if block:
                circle_center = int(top_left_point[0] + cell_width / 2.0), int(top_left_point[1] + cell_height / 2.0)
                block_rgb = _get_rgb(block.color)
                pygame.draw.circle(screen, block_rgb, circle_center, int(min(cell_width, cell_height) / 4.0))

            # Current state.
            if not show_value and (i + 1, height - j) == (state.x + 1, state.y + 1) and agent_shape is None:
                tri_center = int(top_left_point[0] + cell_width / 2.0), int(top_left_point[1] + cell_height / 2.0)
                agent_shape = _draw_agent(tri_center, screen, base_size=min(cell_width, cell_height) / 2.5 - 8)

    if agent_shape is not None:
        # Clear the old shape.
        pygame.draw.rect(screen, (255, 255, 255), agent_shape)
        top_left_point = width_buffer + cell_width * ((state.x + 1) - 1), height_buffer + cell_height * (
                height - (state.y + 1))
        tri_center = int(top_left_point[0] + cell_width / 2.0), int(top_left_point[1] + cell_height / 2.0)

        # Draw new.
        # if not show_value or policy is not None:
        agent_shape = _draw_agent(tri_center, screen, base_size=min(cell_width, cell_height) / 2.5 - 16)

    pygame.display.flip()

    return agent_shape