Ejemplo n.º 1
0
class RLAlgorithm(object):
    def __init__(self):
        self.keepr = Keeper()
        self.model = Model()

    # compute transition function P(s1, a, s2)
    def get_transition(self, s1, a, s2):
       return self.keepr.get_transition(s1, a, s2)

    def get_reward(self, s1, a, s2):
        return self.keepr.get_reward(s1, a, s2)

    def get_transition_table(self, state, action):
        return self.keepr.get_transition_table(state, action, self.model.get_next_states(state))

    def get_reward_table(self, state, action):
        return self.keepr.get_reward_table(state, state, action, self.model.get_next_states(state))

    # update the transition model, keeping track of counts
    def update_transition(self, s1, a, s2):
        self.keepr.update_transition(s1, a, s2)

    # keeping track of the reward model
    def update_reward(self, s1, a, s2, r):
       self.keepr.update_reward(s1, a, s2, r)
Ejemplo n.º 2
0
class RLAlgorithm(object):
    def __init__(self):
        self.keepr = Keeper()
        self.model = Model()

    # compute transition function P(s1, a, s2)
    def get_transition(self, s1, a, s2):
        return self.keepr.get_transition(s1, a, s2)

    def get_reward(self, s1, a, s2):
        return self.keepr.get_reward(s1, a, s2)

    def get_transition_table(self, state, action):
        return self.keepr.get_transition_table(
            state, action, self.model.get_next_states(state))

    def get_reward_table(self, state, action):
        return self.keepr.get_reward_table(state, state, action,
                                           self.model.get_next_states(state))

    # update the transition model, keeping track of counts
    def update_transition(self, s1, a, s2):
        self.keepr.update_transition(s1, a, s2)

    # keeping track of the reward model
    def update_reward(self, s1, a, s2, r):
        self.keepr.update_reward(s1, a, s2, r)