def test_compile_4(): mdp = MDP() t1 = Transition("s0", "a0", "s1", 1, 0) mdp.add_transition(t1) with pytest.raises(ValueError): mdp.ensure_compiled() mdp.add_terminal_state("s1") mdp.ensure_compiled()
def test_compile_5(): mdp = MDP() t1 = Transition("s0", "a0", "s1", 1, 0) mdp.add_transition(t1) mdp.add_terminal_state("s1") mdp.ensure_compiled() assert mdp.compiled assert type(mdp.states) is tuple assert type(mdp.actions) is tuple assert type(mdp.terminal_states) is tuple mdp._decompile() assert not mdp.compiled assert type(mdp.states) is set assert type(mdp.actions) is set assert type(mdp.terminal_states) is set
def test_set_terminal(): mdp = MDP() t1 = Transition("s0", "a0", "s1", 0, 0) mdp.add_transition(t1) mdp.add_terminal_state("s0") assert len(mdp.states) == 2 assert len(mdp.actions) == 1 assert len(mdp.terminal_states) == 1 assert "s0" in mdp.terminal_states mdp.add_terminal_state("s4") assert len(mdp.states) == 3 assert len(mdp.terminal_states) assert "s4" in mdp.terminal_states assert "s4" in mdp.states
:param theta (float, optional): stop threshold, defaults to 1e-6 :return (Tuple[np.ndarray of float with dim (num of states, num of actions), np.ndarray of float with dim (num of states)]): Tuple of calculated policy and value function """ self.mdp.ensure_compiled() self.theta = theta return self._policy_improvement() if __name__ == "__main__": mdp = MDP() mdp.add_transition( # start action end prob reward Transition("high", "wait", "high", 1, 2), Transition("high", "search", "high", 0.8, 5), Transition("high", "search", "low", 0.2, 5), Transition("high", "recharge", "high", 1, 0), Transition("low", "recharge", "high", 1, 0), Transition("low", "wait", "low", 1, 2), Transition("low", "search", "high", 0.6, -3), Transition("low", "search", "low", 0.4, 5), ) solver = ValueIteration(mdp, 0.9) policy, valuefunc = solver.solve() print("---Value Iteration---") print("Policy:") print(solver.decode_policy(policy)) print("Value Function")