예제 #1
0
파일: msp.py 프로젝트: leigao-ceee/msppy
    def discretize(self,
                   n_samples=None,
                   random_state=None,
                   replace=True,
                   n_Markov_states=None,
                   method='SA',
                   n_sample_paths=None,
                   Markov_states=None,
                   transition_matrix=None,
                   int_flag=0):
        """Discretize Markovian continuous uncertainty by k-means or (robust)
        stochasitic approximation.

        Parameters
        ----------
        n_samples: int, optional, default=None
            number of i.i.d. samples to generate for stage-wise independent
            randomness.

        random_state: None | int | instance of RandomState, optional, default=None
            If int, random_state is the seed used by the
            random number generator;
            If RandomState instance, random_state is the
            random number generator;
            If None, the random number generator is the
            RandomState instance used by numpy.random.

        replace: bool, optional, default=True
            Indicates generating i.i.d. samples with/without replacement for
            stage-wise independent randomness.

        n_Markov_states: list | int, optional, default=None
            If list, it specifies different dimensions of Markov state space
            over time. Length of the list should equal length of the Markovian
            uncertainty.
            If int, it specifies dimensions of Markov state space.
            Note: If the uncertainties are int, trained Markov states will be
            rounded to integers, and duplicates will be removed. In such cases,
            there is no guaranttee that the number of Markov states is n_Markov_states.

        method: binary, optional, default=0
            'input': the approximating Markov chain is given by user input (
            through specifying Markov_states and transition_matrix)
            'SAA': use k-means to train Markov chain.
            'SA': use stochastic approximation to train Markov chain.
            'RSA': use robust stochastic approximation to train Markov chain.

        n_sample_paths: int, optional, default=None
            number of sample paths to train the Markov chain.

        Markov_states/transition_matrix: matrix-like, optional, default=None
            The user input of approximating Markov chain.
        """
        if n_samples is not None:
            if isinstance(n_samples, (numbers.Integral, numpy.integer)):
                if n_samples < 1:
                    raise ValueError("n_samples should be bigger than zero!")
                n_samples = ([1] + [n_samples] * (self.T - 1))
            elif isinstance(n_samples, (abc.Sequence, numpy.ndarray)):
                if len(n_samples) != self.T:
                    raise ValueError(
                        "n_samples list should be of length {} rather than {}!"
                        .format(self.T, len(n_samples)))
                if n_samples[0] != 1:
                    raise ValueError(
                        "The first stage model should be deterministic!")
            else:
                raise ValueError("Invalid input of n_samples!")
            # discretize stage-wise independent continuous distribution
            random_state = check_random_state(random_state)
            for t in range(1, self.T):
                self.models[t]._discretize(n_samples[t], random_state, replace)
        if n_Markov_states is None and method != 'input': return
        if method == 'input' and (Markov_states is None
                                  or transition_matrix is None):
            return
        if n_Markov_states is not None:
            if isinstance(n_Markov_states, (numbers.Integral, numpy.integer)):
                if n_Markov_states < 1:
                    raise ValueError(
                        "n_Markov_states should be bigger than zero!")
                n_Markov_states = ([1] + [n_Markov_states] * (self.T - 1))
            elif isinstance(n_Markov_states, (abc.Sequence, numpy.ndarray)):
                if len(n_Markov_states) != self.T:
                    raise ValueError(
                        "n_Markov_states list should be of length {} rather than {}!"
                        .format(self.T, len(n_Markov_states)))
                if n_Markov_states[0] != 1:
                    raise ValueError(
                        "The first stage model should be deterministic!")
            else:
                raise ValueError("Invalid input of n_Markov_states!")
        from msppy.discretize import Markovian
        if method in ['RSA', 'SA', 'SAA']:
            markovian = Markovian(
                f=self.Markovian_uncertainty,
                n_Markov_states=n_Markov_states,
                n_sample_paths=n_sample_paths,
                int_flag=int_flag,
            )
        if method in ['RSA', 'SA', 'SAA']:
            self.Markov_states, self.transition_matrix = getattr(
                markovian, method)()
        elif method == 'input':
            dim_Markov_states, n_Markov_states = (
                check_Markov_states_and_transition_matrix(
                    Markov_states=Markov_states,
                    transition_matrix=transition_matrix,
                    T=self.T,
                ))
            if dim_Markov_states != self.dim_Markov_states:
                raise ValueError(
                    "The dimension of the given sample path " +
                    "generator is not the same as the given Markov chain " +
                    "approximation!")
            self.Markov_states = Markov_states
            self.transition_matrix = [
                numpy.array(item) for item in transition_matrix
            ]
        self._flag_discrete = 1
        self.n_Markov_states = n_Markov_states
        if method in ['RSA', 'SA', 'SAA']:
            return markovian
예제 #2
0
    ))

T = 120
inflow_initial = numpy.array(
    [[41248.7153, 7386.860854, 10124.56146, 6123.808537]])


def generator(random_state, size):
    inflow = numpy.empty([size, T, 4])
    inflow[:, 0, :] = inflow_initial
    for t in range(T - 1):
        noise = numpy.exp(
            random_state.multivariate_normal(mean=[0] * 4,
                                             cov=sigma[t % 12],
                                             size=size))
        inflow[:, t + 1, :] = noise * ((1 - gamma[t % 12]) * exp_mu[t % 12] +
                                       gamma[t % 12] * exp_mu[t % 12] / exp_mu[
                                           (t - 1) % 12] * inflow[:, t, :])
    return inflow


start = time.time()
markovian = Markovian(
    f=generator,
    n_Markov_states=[1] + [n_Markov_states] * (T - 1),
    n_sample_paths=n_sample_paths,
)
Markov_states, transition_matrix = getattr(markovian, method)()
markovian.write("./{}/".format(method + str(n_Markov_states)))
print(time.time() - start)
예제 #3
0
def test_RSA():
    m = Markovian(f, n_Markov_states, 5)
    n = Markovian(g, n_Markov_states, 5)
    m.RSA()
    n.RSA()
예제 #4
0
def test_SAA():
    m = Markovian(f, n_Markov_states, 5)
    n = Markovian(g, n_Markov_states, 5)
    m.SAA()
    n.SAA()
예제 #5
0
# augmented Markovian process generator
def generator_augmented(random_state, size):
    # (r_it, r_Mt, epsilon_Mt, sigma^2_Mt)
    process = generator(random_state, size)
    market_return = process[:,:,0]
    process_aug = numpy.concatenate(
        (beta*(market_return[:,:,numpy.newaxis]-rf) + rf,process),
        axis=-1,
    )
    return process_aug
# Markov chain discretization
sample_paths = generator(numpy.random.RandomState(0),size=1000)
return_sample_paths = sample_paths[:,:,0]
var_sample_paths = sample_paths[:,:,2]
price_sample_paths = numpy.cumprod(numpy.exp(return_sample_paths),axis=1)
markovian = Markovian(generator,n_Markov_states=[1]+[100]*(T-1),n_sample_paths=1000000)
markovian.SA()
# augment to 103 dimension
Markov_states = [None for _ in range(T)]
transition_matrix = markovian.transition_matrix
for t in range(T):
    market_return = markovian.Markov_states[t][:,0].reshape(-1,1)
    asset_return_market_exposure = beta*(market_return-rf) + rf
    Markov_states[t] = numpy.concatenate(
        (asset_return_market_exposure,markovian.Markov_states[t]), axis=1)
# comparison of the true process vs the Markov chain approximation
fig, ax = plt.subplots(1,2,figsize=(10,5),sharey=True)
fig = fan_plot(return_sample_paths, ax=ax[0])
s = markovian.simulate(1000)
fig = fan_plot(s[:,:,-3], ax=ax[1])
ax[0].set_xlabel("stages")