Exemple #1
0
    def backward_induction_vf_and_pi(
        self, ff: Sequence[Callable[[float], float]]
    ) -> Iterator[Tuple[DNNApprox[float], Policy[float, float]]]:

        init_fa: DNNApprox[float] = self.get_vf_func_approx(ff)

        mdp_f0_mu_triples: Sequence[
            Tuple[
                MarkovDecisionProcess[float, float],
                DNNApprox[float],
                SampledDistribution[float],
            ]
        ] = [
            (self.get_mdp(i), init_fa, self.get_states_distribution(i))
            for i in range(self.time_steps())
        ]

        num_state_samples: int = 300
        error_tolerance: float = 1e-8

        return back_opt_vf_and_policy(
            mdp_f0_mu_triples=mdp_f0_mu_triples,
            γ=1.0,
            num_state_samples=num_state_samples,
            error_tolerance=error_tolerance,
        )
Exemple #2
0
    def backward_induction_vf_and_pi(
        self, features: Sequence[Callable[[StateType],
                                          float]], reg_coeff: float
    ) -> Iterator[Tuple[FunctionApprox[StateType], Policy[StateType, bool]]]:

        mdp_f0_mu_triples: Sequence[Tuple[MarkovDecisionProcess[StateType,
                                                                bool],
                                          FunctionApprox[StateType],
                                          SampledDistribution[StateType]]] = [
                                              (self.get_mdp(t=i),
                                               self.get_vf_func_approx(
                                                   t=i,
                                                   features=features,
                                                   reg_coeff=reg_coeff),
                                               self.get_states_distribution(
                                                   t=i))
                                              for i in range(self.num_steps +
                                                             1)
                                          ]

        num_state_samples: int = 1000

        return back_opt_vf_and_policy(mdp_f0_mu_triples=mdp_f0_mu_triples,
                                      γ=np.exp(-self.rate * self.expiry /
                                               self.num_steps),
                                      num_state_samples=num_state_samples,
                                      error_tolerance=1e-8)
Exemple #3
0
    def test_value_iteration(self):
        vpstar = optimal_vf_and_policy(self.mdp_seq, 1.)
        states = self.single_step_mdp.states()
        fa_dynamic = Dynamic({s: 0.0 for s in states})
        fa_tabular = Tabular()
        distribution = Choose(set(states))
        approx_vpstar_finite = back_opt_vf_and_policy_finite(
            [(self.mdp_seq[i], fa_dynamic) for i in range(self.steps)],
            1.
        )
        approx_vpstar = back_opt_vf_and_policy(
            [(self.single_step_mdp, fa_tabular, distribution)
             for _ in range(self.steps)],
            1.,
            num_state_samples=120,
            error_tolerance=0.01
        )

        for t, ((v1, _), (v2, _), (v3, _)) in enumerate(zip(
                vpstar,
                approx_vpstar_finite,
                approx_vpstar
        )):
            states = self.mdp_seq[t].keys()
            v1_arr = np.array([v1[s] for s in states])
            v2_arr = v2.evaluate(states)
            v3_arr = v3.evaluate(states)
            self.assertLess(max(abs(v1_arr - v2_arr)), 0.001)
            self.assertLess(max(abs(v1_arr - v3_arr)), 1.0)
Exemple #4
0
    def backward_induction_vf_and_pi(
        self
    ) -> Iterator[Tuple[ValueFunctionApprox[PriceAndShares],
                        DeterministicPolicy[PriceAndShares, int]]]:

        mdp_f0_mu_triples: Sequence[
            Tuple[MarkovDecisionProcess[PriceAndShares, int],
                  ValueFunctionApprox[PriceAndShares],
                  SampledDistribution[NonTerminal[PriceAndShares]]]] = [
                      (self.get_mdp(i), self.func_approx,
                       self.get_states_distribution(i))
                      for i in range(self.time_steps)
                  ]

        num_state_samples: int = 10000
        error_tolerance: float = 1e-6

        return back_opt_vf_and_policy(mdp_f0_mu_triples=mdp_f0_mu_triples,
                                      γ=self.discount_factor,
                                      num_state_samples=num_state_samples,
                                      error_tolerance=error_tolerance)