Пример #1
0
 def test_draw_hypothesis(self):
     keepr = Keeper()
     for i in range(1000):
         keepr.update_reward_and_transition(self.s1, self.act_a, self.s2, 1.4)        
     hypothesis = Hypothesis.draw_hypothesis(self.model, keepr)
     for next_state in self.model.get_next_states(self.s1):
         self.assertGreater(hypothesis.get_transition(self.s1, self.act_a, next_state), 0)
     places = 1
     self.assertAlmostEqual(hypothesis.get_transition(self.s1, self.act_a, self.s2), 1, places)       
     self.assertAlmostEqual(hypothesis.get_reward(self.s1, self.act_a), 1.4, places)            
 def test_draw_hypothesis(self):
     keepr = Keeper()
     for i in range(1000):
         keepr.update_reward_and_transition(self.s1, self.act_a, self.s2,
                                            1.4)
     hypothesis = Hypothesis.draw_hypothesis(self.model, keepr)
     for next_state in self.model.get_next_states(self.s1):
         self.assertGreater(
             hypothesis.get_transition(self.s1, self.act_a, next_state), 0)
     places = 1
     self.assertAlmostEqual(
         hypothesis.get_transition(self.s1, self.act_a, self.s2), 1, places)
     self.assertAlmostEqual(hypothesis.get_reward(self.s1, self.act_a), 1.4,
                            places)
Пример #3
0
class RLAlgorithm(object):
    def __init__(self):
        self.keepr = Keeper()
        self.model = Model()

    # compute transition function P(s1, a, s2)
    def get_transition(self, s1, a, s2):
       return self.keepr.get_transition(s1, a, s2)

    def get_reward(self, s1, a, s2):
        return self.keepr.get_reward(s1, a, s2)

    def get_transition_table(self, state, action):
        return self.keepr.get_transition_table(state, action, self.model.get_next_states(state))

    def get_reward_table(self, state, action):
        return self.keepr.get_reward_table(state, state, action, self.model.get_next_states(state))

    # update the transition model, keeping track of counts
    def update_transition(self, s1, a, s2):
        self.keepr.update_transition(s1, a, s2)

    # keeping track of the reward model
    def update_reward(self, s1, a, s2, r):
       self.keepr.update_reward(s1, a, s2, r)
Пример #4
0
class RLAlgorithm(object):
    def __init__(self):
        self.keepr = Keeper()
        self.model = Model()

    # compute transition function P(s1, a, s2)
    def get_transition(self, s1, a, s2):
        return self.keepr.get_transition(s1, a, s2)

    def get_reward(self, s1, a, s2):
        return self.keepr.get_reward(s1, a, s2)

    def get_transition_table(self, state, action):
        return self.keepr.get_transition_table(
            state, action, self.model.get_next_states(state))

    def get_reward_table(self, state, action):
        return self.keepr.get_reward_table(state, state, action,
                                           self.model.get_next_states(state))

    # update the transition model, keeping track of counts
    def update_transition(self, s1, a, s2):
        self.keepr.update_transition(s1, a, s2)

    # keeping track of the reward model
    def update_reward(self, s1, a, s2, r):
        self.keepr.update_reward(s1, a, s2, r)
Пример #5
0
 def __init__(self):
     self.keepr = Keeper()
     self.model = Model()
Пример #6
0
 def __init__(self):
     self.keepr = Keeper()
     self.model = Model()