Пример #1
0
 def test_bidirectional_simulator(self):
     simulator = BidirectionalSimulator(UncertaintySystem([self.model]), self.timeline, 10000)
     while (simulator.go_back()):
         pass
def optimize(optional_asset, market_assumptions, nb_simulations=1000, regression=polynomial_regression):

    discount_factor = market_assumptions.discount_factor
    exercise_times = list(optional_asset.exercise_times)
    simulation_timeline = sorted(set([0.0] + exercise_times))

    # create a simulator, we will go backward
    # TODO only simulate required uncertainties.
    simulator = BidirectionalSimulator(
        market_assumptions.uncertainty_system, simulation_timeline, nb_simulations, antithetic=True
    )
    simulator.go_end()

    bellman_values_shape = (1, simulator.nb_simulations)
    next_bellman_values = defaultdict(lambda: np.zeros(bellman_values_shape))

    # set terminal values
    # conditional_expectancy_store contains for a given t,
    # the expected return of the asset at t+, knowing the state of the asset
    # at t+
    conditional_expectancy_store = ConditionalExpectancyStore()

    nb_exercises = len(exercise_times) - 1
    t_final = simulation_timeline[-1]
    # considering the state accessible at the last exercise date...
    for asset_state in optional_asset.get_accessible_states(nb_exercises - 1):
        # we check all the accessible state at the next step :
        for command in asset_state.get_commands(t_final):
            final_state = command.dest_state
            conditional_expectancy_store.set(simulator.tid, final_state, final_state.get_final_value(simulator.tid))
    bellman_values = np.zeros((1, simulator.nb_simulations))

    # backward optimization
    for t in exercise_times[::-1]:
        discount_factor_t = discount_factor(t)
        cur_bellman_values = defaultdict(lambda: defaultdict(lambda: 0.0))
        t_id = simulator.tid
        # TODO differentiate simulation timeline and asset timeline.
        for asset_state in optional_asset.get_accessible_states(t_id):
            # returns the bellman values for all simulations
            bellman_values_if_commands = compute_expected_value_foreach_command(
                asset_state, simulator.state, conditional_expectancy_store, market_assumptions.discount_factor
            )
            # optimal command
            optimal_command_id = compute_optimal_command(
                asset_state, simulator.state, conditional_expectancy_store, discount_factor
            )
            bellman_values = 0.0
            commands = asset_state.get_commands(t)
            bellman_values_if_command = np.zeros((len(commands), simulator.nb_simulations))
            for (command_id, command) in enumerate(commands):
                bellman_values_if_command[command_id : command_id + 1, :] = (
                    command.payoff(simulator.state) * discount_factor_t + next_bellman_values[command.dest_state]
                )
            bellman_values = bellman_values_if_command[optimal_command_id, np.arange(simulator.nb_simulations)].reshape(
                1, simulator.nb_simulations
            )
            cur_bellman_values[asset_state] = bellman_values
            regression_information = asset_state.extract_markov_state(simulator.previous)
            conditional_expectancy = compose(
                regression(regression_information, bellman_values), optional_asset.extract_markov_state
            )
            conditional_expectancy_store.set(t_id - 1, asset_state, conditional_expectancy)

        next_bellman_values = cur_bellman_values
        simulator.go_back()

    mtm = MonteCarloEstimation(bellman_values, antithetic=antithetic)

    return OptimizedAsset(optional_asset, conditional_expectancy_store, mtm)