예제 #1
0
def generate_shocks(trans_mat, N, T):
    np.random.seed(int(round(tm.time())))
    # np.random.seed(0)

    agg_trans_mat = p_agg(trans_mat)
    emp_trans_mat = trans_mat / np.kron(agg_trans_mat, np.ones((2, 2)))

    mc = qe.MarkovChain(agg_trans_mat)
    agg_shocks = mc.simulate(ts_length=T, init=0)
    emp_shocks = np.zeros((T, N))

    draw0 = np.random.uniform(size=N)
    emp_shocks[0, :] = draw0 > ur_b

    # generate idiosyncratic shocks for all agents starting in second period
    draws = np.random.uniform(size=(T - 1, N))
    for t in range(1, T):
        curr_emp_trans_mat = emp_trans_mat[2 * agg_shocks[t - 1]:2 *
                                           agg_shocks[t - 1] + 2, 2 *
                                           agg_shocks[t]:2 * agg_shocks[t] + 2]
        curr_emp_trans_probs = np.where(emp_shocks[t - 1, :] == 0.0,
                                        curr_emp_trans_mat[0, 0],
                                        curr_emp_trans_mat[1, 1])
        emp_shocks[t, :] = np.where(curr_emp_trans_probs > draws[t - 1, :],
                                    emp_shocks[t - 1, :],
                                    1 - emp_shocks[t - 1, :])

    return emp_shocks, agg_shocks
예제 #2
0
    def __init__(self, class_ids, classes=None, mask=None, fill_empty_classes=False):
        if classes is not None:
            self.classes = classes
        else:
            self.classes = np.unique(class_ids)

        k = len(self.classes)
        class_ids = np.array(class_ids) if not isinstance(class_ids, np.ndarray) else class_ids
        transitions = np.zeros((k, k))
        b = 1 if mask is None else mask.ravel()
        for ii in range(len(class_ids) - 1):
            np.add.at(transitions, (class_ids[ii].ravel(), class_ids[ii + 1].ravel()), b)
        self.transitions = transitions
        self.p = transitions / np.clip(transitions.sum((-1), keepdims=True), a_min=1, a_max=None)
        if fill_empty_classes:
            self.p = fill_empty_diagonals(self.p)

        p_tmp = self.p
        p_tmp = fill_empty_diagonals(p_tmp)
        markovchain = qe.MarkovChain(p_tmp)
        self.num_cclasses = markovchain.num_communication_classes
        self.num_rclasses = markovchain.num_recurrent_classes

        self.cclasses_indices = markovchain.communication_classes_indices
        self.rclasses_indices = markovchain.recurrent_classes_indices
        transient = set(list(map(tuple, self.cclasses_indices))).difference(
            set(list(map(tuple, self.rclasses_indices)))
        )
        self.num_tclasses = len(transient)
        if len(transient):
            self.tclasses_indices = [np.asarray(i) for i in transient]
        else:
            self.tclasses_indices = None
        self.astates_indices = list(np.argwhere(np.diag(p_tmp) == 1))
        self.num_astates = len(self.astates_indices)
예제 #3
0
def gen_obs(N_t, PF_old, theta, rc):
    mc = qe.MarkovChain(PPi)
    sim_ix = [0]
    sim_ie0 = [qe.DiscreteRV(ssd).draw(k=1)[0]]
    sim_ie1 = [qe.DiscreteRV(ssd).draw(k=1)[0]]

    for t in range(1, N_t):
        decision = PF_old[sim_ix[t - 1], sim_ie0[t - 1], sim_ie1[t - 1]]
        if decision == 0:
            sim_ix.append(sim_ix[t - 1] + 1)
            sim_ie0.append(mc.simulate(ts_length=2, init=sim_ie0[t - 1])[1])
            sim_ie1.append(qe.DiscreteRV(ssd).draw(k=1)[0])
        else:
            sim_ix.append(0)
            sim_ie0.append(qe.DiscreteRV(ssd).draw(k=1)[0])
            sim_ie1.append(qe.DiscreteRV(ssd).draw(k=1)[0])

    #plt.hist(sim_ix, bins='auto')
    data = collections.Counter(sim_ix)
    data = dict(data)
    #print(data)
    array = np.array(list(data.items()), dtype=int)
    nmax = np.max(array, axis=0)[0]
    addi = np.vstack((np.arange(nmax + 1,
                                N_x + 1), np.zeros(N_x - nmax - 1 + 1))).T
    array = np.append(array, addi, axis=0)
    df = pd.DataFrame(array, columns=['milage', 'obs'])
    df['rep'] = -df['obs'].diff()
    df = df.fillna(0)
    ret_obs = df.rep.values
    ret_obs = ret_obs[1:]
    return ret_obs
예제 #4
0
def consumption_incomplete(cp, N_simul=150):
    """
    Computes endogenous values for the incomplete market case.

    Parameters
    ----------

    cp : instance of ConsumptionProblem
    N_simul : int

    """

    beta, P, y, b0 = cp.beta, cp.P, cp.y, cp.b0  # Unpack
    # For the simulation define a quantecon MC class
    mc = qe.MarkovChain(P)

    # Useful variables
    y = np.asarray(y).reshape(2, 1)
    v = np.linalg.inv(np.eye(2) - beta * P) @ y

    # Simulat state path
    s_path = mc.simulate(N_simul, init=0)

    # Store consumption and debt path
    b_path, c_path = np.ones(N_simul + 1), np.ones(N_simul)
    b_path[0] = b0

    # Optimal decisions from (12) and (13)
    db = ((1 - beta) * v - y) / beta

    for i, s in enumerate(s_path):
        c_path[i] = (1 - beta) * (v - b_path[i] * np.ones((2, 1)))[s, 0]
        b_path[i + 1] = b_path[i] + db[s, 0]

    return c_path, b_path[:-1], y[s_path], s_path
예제 #5
0
 def __init__(self, p, N, epsilon, mode="seq", init=0, sample=10000):
     self.p = kmr_markov_matrix(p, N, epsilon, mode)
     self.epsilon = epsilon
     self.mc = qe.MarkovChain(self.p)
     self.state = self.sample_path
     self.N = N
     self.init = init
     self.sample = sample
예제 #6
0
파일: kmr.py 프로젝트: myuuuuun/KMR
 def __init__(self, profits, num_players, epsilon):
     self.num_players = num_players
     self.num_actions = 2 if isinstance(profits, int) else len(profits)
     self.num_states = sc.misc.comb(self.num_players + self.num_actions - 1, self.num_actions - 1, exact=True)
     self.state_players = make_state_players(num_players, self.num_actions)
     self.profit_matrix = make_profit_matrix(profits)
     self.transition_matrix = kmr_markov_matrix(profits, num_players, self.num_actions, epsilon)
     self.mc = qe.MarkovChain(self.transition_matrix)
예제 #7
0
def generate_types_shocks(trans_mat, N, T):
    #np.random.seed(int(round(tm.time())))
    #np.random.seed(0)

    mc = qe.MarkovChain(trans_mat, ['L', 'M', 'H'])
    types_shocks = mc.simulate(ts_length=T * N).reshape(T, N)

    stat_dist = mc.stationary_distributions

    return types_shocks, stat_dist
예제 #8
0
def get_stationary_states(treatment):
    """
    input: treatment list of stochastic numpy arrays
    output: list of numpy arrays; each array is a group's stationary distribution
    """
    stationary_distributions = []
    for matrix in treatment:
        mc = qe.MarkovChain(matrix)
        s = mc.stationary_distributions
        stationary_distributions.append(s)
    return (stationary_distributions)
예제 #9
0
def sojourn_time(p, summary=False):
    """
    Calculate sojourn time based on a given transition probability matrix.
    Parameters
    ----------
    p        : array
               (k, k), a Markov transition probability matrix.
    summary  : bool
               If True and the Markov Chain has absorbing states whose
               sojourn time is infinitely large, print out the information
               about the absorbing states. Default is True.
    Returns
    -------
             : array
               (k, ), sojourn times. Each element is the expected time a Markov
               chain spends in each state before leaving that state.
    Notes
    -----
    Refer to :cite:`Ibe2009` for more details on sojourn times for Markov
    chains.
    Examples
    --------
    >>> from giddy.markov import sojourn_time
    >>> import numpy as np
    >>> p = np.array([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
    >>> sojourn_time(p)
    array([2., 1., 2.])
    Non-ergodic Markov Chains with rows full of 0
    >>> p = np.array([[.5, .25, .25], [.5, 0, .5],[ 0, 0, 0]])
    >>> sojourn_time(p)
    Sojourn times are infinite for absorbing states! In this Markov Chain, states [2] are absorbing states.
    array([ 2.,  1., inf])
    """

    p = np.asarray(p)
    if (p.sum(axis=1) == 0).sum() > 0:
        p = fill_empty_diagonals(p)

    markovchain = qe.MarkovChain(p)
    pii = p.diagonal()

    if not (1 - pii).all():
        absorbing_states = np.where(pii == 1)[0]
        non_absorbing_states = np.where(pii != 1)[0]
        st = np.full(len(pii), np.inf)
        if summary:
            print("Sojourn times are infinite for absorbing states! In this "
                  "Markov Chain, states {} are absorbing states.".format(
                      list(absorbing_states)))
        st[non_absorbing_states] = 1 / (1 - pii[non_absorbing_states])
    else:
        st = 1 / (1 - pii)
    return st
예제 #10
0
def generate_types_shocks_stat_shares(trans_mat, N, T, range_ratio=0.1):
    types_values = ['L', 'M', 'H']

    mc = qe.MarkovChain(trans_mat)
    stat_dist = mc.stationary_distributions[0]

    nshares = [int(N * j) for j in stat_dist]
    vals_nshares_dict = dict(zip(types_values, nshares))

    t0_states = np.concatenate([
        np.repeat('L', int(vals_nshares_dict['L'])),
        np.repeat('M', int(vals_nshares_dict['M'])),
        np.repeat('H', int(vals_nshares_dict['H']))
    ])
    types_shocks = []
    for i in range(T):
        shares_requirement = False
        while not shares_requirement:

            prob_array = np.random.rand(
                int(vals_nshares_dict['L']) + int(vals_nshares_dict['M']) +
                int(vals_nshares_dict['H']))
            tuples_array = list(zip(t0_states, prob_array))

            t1_states = [
                markov_one_step(c, p, trans_mat) for c, p in tuples_array
            ]
            unique, counts = np.unique(t1_states, return_counts=True)
            realized_shares = dict(zip(unique, counts))

            condL = (realized_shares['L'] <=
                     (1 + range_ratio) * vals_nshares_dict['L']) & (
                         realized_shares['L'] >=
                         (1 - range_ratio) * vals_nshares_dict['L'])
            condM = (realized_shares['M'] <=
                     (1 + range_ratio) * vals_nshares_dict['M']) & (
                         realized_shares['M'] >=
                         (1 - range_ratio) * vals_nshares_dict['M'])
            condH = (realized_shares['H'] <=
                     (1 + range_ratio) * vals_nshares_dict['H']) & (
                         realized_shares['H'] >=
                         (1 - range_ratio) * vals_nshares_dict['H'])

            if condL & condM & condH:
                types_shocks.append(t1_states)
                t0_states = t1_states
                shares_requirement = True

    return np.array(types_shocks), stat_dist
예제 #11
0
def generate_shocks0(trans_mat, N, T):

    prob = trans_mat

    ag_shock = np.zeros((T, 1))
    id_shock = np.zeros((T, N))
    np.random.seed(0)

    # ag_shock = np.zeros((T, 1))
    # Transition probabilities between aggregate states
    prob_ag = np.zeros((2, 2))
    prob_ag[0, 0] = prob[0, 0] + prob[0, 1]
    prob_ag[1, 0] = 1 - prob_ag[0, 0]  # bad state to good state
    prob_ag[1, 1] = prob[2, 2] + prob[2, 3]
    prob_ag[0, 1] = 1 - prob_ag[1, 1]

    P = prob / np.kron(prob_ag, np.ones((2, 2)))
    # generate aggregate shocks
    mc = qe.MarkovChain(prob_ag)
    ag_shock = mc.simulate(ts_length=T, init=0)  # start from bad state
    # generate idiosyncratic shocks for all agents in the first period
    draw = np.random.uniform(size=N)
    id_shock[
        0, :] = draw > ur_b  #set state to good if probability exceeds ur_b

    # generate idiosyncratic shocks for all agents starting in second period
    draw = np.random.uniform(size=(T - 1, N))
    for t in range(1, T):
        # Fix idiosyncratic itransition matrix conditional on aggregate state
        transition = P[2 * ag_shock[t - 1]:2 * ag_shock[t - 1] + 2,
                       2 * ag_shock[t]:2 * ag_shock[t] + 2]
        transition_prob = [
            transition[int(id_shock[t - 1, i]),
                       int(id_shock[t - 1, i])] for i in range(N)
        ]
        check = transition_prob > draw[
            t - 1, :]  #sign whether to remain in current state
        id_shock[t, :] = id_shock[t - 1, :] * check + (
            1 - id_shock[t - 1, :]) * (1 - check)

    return id_shock, ag_shock
예제 #12
0
def simulate_mc(treatment, n_iter=None, reps=None):
    """
    input: treatment list of stochastic numpy arrays
    output: list of simulated chains for each group
    bugs:
        - reps > 1 breaks the list of states
    """
    N = n_iter
    simulations = []
    for matrix in treatment:
        # run simulations
        mc = qe.MarkovChain(matrix)
        X = mc.simulate(ts_length=N, num_reps=reps)
        # get fraction of time in each state
        n_states = matrix.shape[0]
        states = range(n_states)
        s = []
        for state in states:
            x = (X == state).cumsum() / (1 + np.arange(N, dtype=float))
            s.append(x)
        simulations.append(s)
    return (simulations)
                   x0=x0,
                   policy_name='Input high Policy')

    ## Plot Value function
    plot_policy_3d(policy=g_m2,
                   save=save,
                   x0=x0,
                   policy_name='Input low Policy')

#%%
# =============================================================================
# Compute stationary distribution
# =============================================================================
T = 1000
N = 1000
mc_θ = qe.MarkovChain(pi_θ)
mc_ε = qe.MarkovChain(pi_ε)

# == Compute the stationary distribution of Pi == #
θ_0 = mc_θ.stationary_distributions[0]
ε_0 = mc_ε.stationary_distributions[0]

θ_h_0 = θ_0[1]  #Proportional of people in good(high) state of rainfall at t=0
ε_h_0 = ε_0[
    1]  #Proportional of people in good(high) state of basis risk at t=0


def invariant_distr(pi_θ, pi_ε, T=1000, N=1000, θ_h_0=0.5, ε_h_0=0.5):
    θ_shock = np.empty(T * N).reshape(T, N)
    ε_shock = np.empty(T * N).reshape(T, N)
    m1_state = np.empty(T * N).reshape(T, N)
예제 #14
0
                i)] += matrix_of_probabilities[-1][j] * p[j][data.index(i)]
        matrix_of_probabilities.append(new)
        data_of_new_seq.append(sum(matrix_of_probabilities[-1]))
    write_to_csw([data_of_new_seq], nameFile)

    #print(matrix_of_probabilities)
    #print('probabiliti = ',sum(matrix_of_probabilities[-1]))


m1 = [tv, tn, bl, bp]
m2 = ['tv', 'tn', 'bl', 'bp']
for j in range(100):
    #sequence = generate_sequance(data, N)
    vector_of_probabilities = generate_vector_of_probabilities(len(data), N)
    for i in range(4):
        mc1 = qe.MarkovChain(m1[i])
        sequence = mc1.simulate(N, random_state=None)
        for k in range(4):
            run(m1[k], m2[k], sequence, j + i, vector_of_probabilities)

print('done')

#p1 = ([
#     [0.1, 0.5, 0.4],
#     [0.3, 0.3, 0.4],
#     [0.8, 0.1, 0.1]])
#
#p2 = ([
#     [0.1, 0.5, 0.4],
#     [0.3, 0.3, 0.4],
#     [0.8, 0, 0.2]])
예제 #15
0
 def __init__(self, p, N, epsilon):
     P = kmr_markov_matrix(p, N, epsilon)
     self.mc = qe.MarkovChain(P)
예제 #16
0
M[0,2] = 1/4*np.exp(-1/T)
M[1,2] = 1/4
M[2,2] = 2/4*(1-np.exp(-1/T))
M[3,2] = 1/4
M[4,2] = 1/4*np.exp(-1/T)

print(np.sum(M[:,2])) 


M[0,3] = 1/4*np.exp(-2/T)
M[1,3] = 1/4
M[2,3] = 1/4*np.exp(-1/T)
M[3,3] = 1/4*(1-np.exp(-1/T)) + 2/4*(1-np.exp(-2/T))
M[4,3] = 1/4*np.exp(-2/T)

print(np.sum(M[:,3]))

M[0,4] = 1/4 
M[1,4] = 1/4
M[2,4] = 1/4 
M[3,4] = 1/4 
M[4,4] = 0

print(np.sum(M[:,4]))


print(M)

mc = qe.MarkovChain(np.transpose(M))
mc.stationary_distributions
예제 #17
0
     (0.145, 0.778, 0.077),
     (0.000, 0.508, 0.492))
P = np.array(P)

psi = (0.0, 0.2, 0.8)        # Initial condition

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')

ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
ax.set_xticks((0.25, 0.5, 0.75))
ax.set_yticks((0.25, 0.5, 0.75))
ax.set_zticks((0.25, 0.5, 0.75))

x_vals, y_vals, z_vals = [], [], []
for t in range(20):
    x_vals.append(psi[0])
    y_vals.append(psi[1])
    z_vals.append(psi[2])
    psi = np.dot(psi, P)

ax.scatter(x_vals, y_vals, z_vals, c='r', s=60)

mc = qe.MarkovChain(P)
psi_star = mc.stationary_distributions[0]
ax.scatter(psi_star[0], psi_star[1], psi_star[2], c='k', s=60)

plt.show()
예제 #18
0
파일: KMR.py 프로젝트: snowdj/Game-Theory
 def __init__(self, p, N, epsilon, simultaneous=False):
     self.p = p
     self.N = N
     self.epsilon = epsilon
     self.P = kmr_markov_matrix(p, N, epsilon, simultaneous)
     self.mc = qe.MarkovChain(self.P)
예제 #19
0
#Define parameter values.

beta = 0.95  #Discount factor
delta = 0.4  #Rate of depreciation
alpha = 0.675  #share of labor in the production function.
h = 0.5  #inelastic labor supply

nu = 2.3  #nu and omega are paramters related with the disutility
omega = 5  #of labor.

#Step 1: Create and simulate the Markov chain for gamma paramter.
prob_gamma = [[0.2, 0.8, 0, 0, 0], [0.5, 0, 0.5, 0, 0], [0, 0.5, 0, 0.5, 0],
              [0, 0, 0.5, 0, 0.5], [0, 0, 0, 0.95, 0.05]]

state_values_gamma = (3, 2.5, 2, 1.5, 1)  #5 states
mc = qe.MarkovChain(prob_gamma, state_values=state_values_gamma)
inestab = mc.simulate(ts_length=80,
                      init=state_values_gamma[0])  #simulate it for 80 periods

#Add periods of stability
stab1 = np.ones(20) * state_values_gamma[0]
stab2 = np.ones(50) * state_values_gamma[0]
gamma = np.concatenate((stab1, inestab, stab2), axis=0)

#Define some functions to compute the main variables of the model (which depend on k)


def y(kt, gamma):
    return kt**(1 - alpha) * (gamma * h)**alpha

def simulate(model, J, ext_default_states, dom_default_states, tot_default_states, ib_d_rep_star, ib_f_rep_star, ib_d_fd_star, ib_f_dd_star, q_d_r, q_d_fd, q_f_r, q_f_dd, y_init=None, b_d_init=None, b_f_init=None):
    """
    Simulates the Selective Default model of sovereign debt

    Parameters
    ----------
    model: Selective_Economy
        An instance of the Selective Default model with the corresponding parameters
    J: integer
        The number of periods that the model should be simulated
    ext_default_states: array(float64, 2)
        A matrix of 0s and 1s that denotes whether the country was in
        external default on their debt in that period (ext_default = 1)
    dom_default_states: array(float64, 2)
        A matrix of 0s and 1s that denotes whether the country was in
        domestic default on their debt in that period (dom_default = 1)
    tot_default_states: array(float64, 2)
        A matrix of 0s and 1s that denotes whether the country was in
        total default on their debt in that period (ext_default & dom_default = 1)
    ib_f_rep_star: array(float64, 2)
        A matrix which specifies the external debt/savings level that a country holds
        during a given state in case of REPAYMENT
    ib_d__rep_star: array(float64, 2)
        A matrix which specifies the domestic debt/savings level that a country holds
        during a given state in case of REPAYMENT
    ib_f_dd_star: array(float64, 2)
        A matrix which specifies the external debt/savings level that a country holds
        during a given state in case of DOMESTIC DEFAULT
    ib_d_fd_star: array(float64, 2)
        A matrix which specifies the DOMESTIC debt/savings level that a country holds
        during a given state in case of FOREIGN DEFAULT
    q_f_r: array(float64, 2)
        A matrix that specifies the price, in case of total repayment, at which a country can borrow/save externally
        for a given state
    q_f_dd: array(float64, 2)
        A matrix that specifies the price, in case of domestic default, at which a country can borrow/save externally
        for a given state
    q_d_r: array(float64, 2)
        A matrix that specifies the price, in case of total repayment, at which a country can borrow/save domestically
        for a given state
    q_d_fd: array(float64, 2)
        A matrix that specifies the price, in case of foreign default, at which a country can borrow/save domestically
        for a given state
    y_init: integer
        Specifies which state the income process should start in
    b_f_init: integer
        Specifies which state the external debt/savings state should start
    b_d_init: integer
        Specifies which state the domestic debt/savings state should start

    Returns
    -------
    y_sim: array(float64, 1)
        A simulation of the country's income
    b_d_sim: array(float64, 1)
        A simulation of the country's domestic debt/savings
    b_f_sim: array(float64, 1)
        A simulation of the country's foreign debt/savings
    q_f_r_sim: array(float64, 1)
        A simulation of the external price, in case of total repayment, required to have an extra unit of
        consumption in the following period
    q_f_dd_sim: array(float64, 1)
        A simulation of the external price, in case of domestic default, required to have an extra unit of
        consumption in the following period
    q_d_r_sim: array(float64, 1)
        A simulation of the domestic price, in case of total repayment, required to have an extra unit of
        consumption in the following period
   q_d_fd_sim: array(float64, 1)
        A simulation of the domestic price, in case of foreign default, required to have an extra unit of
        consumption in the following period
    ext_default_sim: array(bool, 1)
        A simulation of whether the country was in external default or not
    dom_default_sim: array(bool, 1)
        A simulation of whether the country was in domestic default or not
    tot_default_sim: array(bool, 1)
        A simulation of whether the country was in total default or not
    """
    # Find index i such that Bgrid[i] is approximately 0
    zero_b_f_index = np.searchsorted(model.b_f, 0.0)
    zero_b_d_index = np.searchsorted(model.b_d, 0.0)


    # Set initial conditions
    ext_in_default = False
    dom_in_default = False
    tot_in_default = False

    ext_max_y_default = 0.905 * np.mean(model.y)
    dom_max_y_default = 0.955 * np.mean(model.y)
    tot_max_y_default = 0.905 * 0.955 * np.mean(model.y)

    if y_init == None:
        y_init = np.searchsorted(model.y, model.y.mean())
    if b_d_init == None:
        b_d_init = zero_b_d_index
    if b_f_init == None:
        b_f_init = zero_b_f_index

    # Create Markov chain and simulate income process
    mc = qe.MarkovChain(model.P, model.y)
    y_sim_indices = mc.simulate_indices(T+1, init=y_init)

    # Allocate memory for remaining outputs
    b_di = b_d_init
    b_fi = b_f_init
    b_d_sim = np.empty(T)
    b_f_sim = np.empty(T)
    y_sim = np.empty(T)
    q_d_r_sim = np.empty(T)
    q_d_fd_sim = np.empty(T)
    q_f_r_sim = np.empty(T)
    q_f_dd_sim = np.empty(T)
    dom_default_sim = np.empty(T, dtype=bool)
    ext_default_sim = np.empty(T, dtype=bool)
    tot_default_sim = np.empty(T, dtype=bool)

    # Perform simulation
    for t in range(J):
        yi = y_sim_indices[t]

        # Fill y/B for today
        if not ext_in_default and not dom_in_default:
            y_sim[t] = model.y[yi]

        if ext_in_default and not dom_in_default:
            y_sim[t] = np.minimum(model.y[yi], ext_max_y_default)

        if dom_in_default and not ext_in_default:
            y_sim[t] = np.minimum(model.y[yi], dom_max_y_default)

        else:
            y_sim[t] = np.minimum(model.y[yi], tot_max_y_default)

        b_d_sim[t] = model.b_d[b_di]
        b_f_sim[t] = model.b_f[b_fi]

        ext_default_sim[t] = ext_in_default
        dom_default_sim[t] = dom_in_default
        tot_default_sim[t] = tot_in_default

        # Check whether in default and branch depending on that state
        if not ext_in_default and not dom_in_default:
            if ext_default_states[yi, b_di, b_fi] > 1e-4 and dom_default_states[yi, b_di, b_fi] > 1e-4:
                tot_in_default=True
                b_di_next = zero_b_f_index
                b_fi_next = zero_b_d_index

            if ext_default_states[yi, b_di, b_fi] > 1e-4 and not dom_default_states[yi, b_di, b_fi] > 1e-4:
                ext_in_default=True
                b_fi_next = zero_b_f_index
                b_di_next = ib_d_fd_star[yi, b_di, b_fi]

            if dom_default_states[yi, b_di, b_fi] > 1e-4 and not ext_default_states[yi, b_di, b_fi] > 1e-4:
                dom_in_default=True
                b_di_next = zero_b_d_index
                b_fi_next = ib_f_dd_star[yi, b_di, b_fi]

            else:
                b_fi_next = ib_f_rep_star[yi, b_di, b_fi]
                b_di_next = ib_d_rep_star[yi, b_di, b_fi]
        else:
            b_fi_next = zero_b_f_index
            b_di_next = zero_b_d_index
            if np.random.rand() < model.θ_f and np.random.rand() < model.θ_d:
                tot_in_default=False

            if np.random.rand() < model.θ_f and not np.random.rand() < model.θ_d:
                dom_in_default=False

            if np.random.rand() < model.θ_d and not np.random.rand() < model.θ_f:
                ext_in_default=False

        # Fill in states
        q_f_r_sim[t] = q_f_r[yi, b_fi, b_fi_next]
        q_f_dd_sim[t] = q_f_dd[yi, b_fi_next]
        q_d_r_sim[t] = q_d_r[yi, b_di, b_di_next, b_fi, b_fi_next]
        q_d_fd_sim[t] = q_d_fd_sim[yi, b_di, b_di_next]
        b_fi = b_fi_next
        b_di = b_di_next

    return y_sim, b_d_sim, b_f_sim, q_d_r_sim, q_d_fd_sim, q_f_r_sim, q_f_dd_sim, ext_default_sim, dom_default_sim, tot_default_sim
예제 #21
0
파일: ergodic.py 프로젝트: xf37/giddy
def fmpt(P, fill_empty_classes=False):
    """
    Generalized function for calculating first mean passage times for an
    ergodic or non-ergodic transition probability matrix.

    Parameters
    ----------
    P        : array
               (k, k), an ergodic/non-ergodic Markov transition probability
               matrix.
    fill_empty_classes: bool, optional
                        If True, assign 1 to diagonal elements which fall in rows full
                        of 0s to ensure the transition probability matrix is a
                        stochastic one. Default is False.

    Returns
    -------
    fmpt_all : array
               (k, k), elements are the expected value for the number of
               intervals required for a chain starting in state i to first
               enter state j. If i=j then this is the recurrence time.

    Examples
    --------
    >>> import numpy as np
    >>> from giddy.ergodic import fmpt
    >>> np.set_printoptions(suppress=True) #prevent scientific format

    Irreducible Markov chain
    >>> p = np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
    >>> fm = fmpt(p)
    >>> fm
    array([[2.5       , 4.        , 3.33333333],
           [2.66666667, 5.        , 2.66666667],
           [3.33333333, 4.        , 2.5       ]])

    Thus, if it is raining today in Oz we can expect a nice day to come
    along in another 4 days, on average, and snow to hit in 3.33 days. We can
    expect another rainy day in 2.5 days. If it is nice today in Oz, we would
    experience a change in the weather (either rain or snow) in 2.67 days from
    today.

    Reducible Markov chain: two communicating classes (this is an
    artificial example)
    >>> p = np.array([[.5, .5, 0],[.2,0.8,0],[0,0,1]])
    >>> fmpt(p)
    array([[3.5, 2. , inf],
           [5. , 1.4, inf],
           [inf, inf, 1. ]])

    Thus, if it is raining today in Oz we can expect a nice day to come
    along in another 2 days, on average, and should not expect snow to hit.
    We can expect another rainy day in 3.5 days. If it is nice today in Oz,
    we should expect a rainy day in 5 days.


    >>> p = np.array([[.5, .5, 0],[.2,0.8,0],[0,0,0]])
    >>> fmpt(p, fill_empty_classes=True)
    array([[3.5, 2. , inf],
           [5. , 1.4, inf],
           [inf, inf, 1. ]])

    >>> p = np.array([[.5, .5, 0],[.2,0.8,0],[0,0,0]])
    >>> fmpt(p, fill_empty_classes=False)
    Traceback (most recent call last):
        ...
    ValueError: Input transition probability matrix has 1 rows full of 0s. Please set fill_empty_classes=True to set diagonal elements for these rows to be 1 to make sure the matrix is stochastic.
    """

    P = np.asarray(P)
    rows0 = (P.sum(axis=1) == 0).sum()
    if rows0 > 0:
        if fill_empty_classes:
            P = fill_empty_diagonals(P)
        else:
            raise ValueError("Input transition probability matrix has "
                             "%d rows full of 0s. Please set "
                             "fill_empty_classes=True to set diagonal "
                             "elements for these rows to be 1 to make "
                             "sure the matrix is stochastic." % rows0)
    mc = qe.MarkovChain(P)
    num_classes = mc.num_communication_classes
    if num_classes == 1:
        fmpt_all = _fmpt_ergodic(P)
    else:  # deal with non-ergodic Markov chains
        k = P.shape[0]
        fmpt_all = np.zeros((k, k))
        for desti in range(k):
            b = np.ones(k - 1)
            p_sub = np.delete(np.delete(P, desti, 0), desti, 1)
            p_calc = np.eye(k - 1) - p_sub
            m = np.full(k - 1, np.inf)
            row0 = (p_calc != 0).sum(axis=1)
            none0 = np.arange(k - 1)
            try:
                m[none0] = np.linalg.solve(p_calc, b)
            except np.linalg.LinAlgError as err:
                if "Singular matrix" in str(err):
                    if (row0 == 0).sum() > 0:
                        index0 = set(np.argwhere(row0 == 0).flatten())
                        x = (p_calc[:, list(index0)] != 0).sum(axis=1)
                        setx = set(np.argwhere(x).flatten())
                        while not setx.issubset(index0):
                            index0 = index0.union(setx)
                            x = (p_calc[:, list(index0)] != 0).sum(axis=1)
                            setx = set(np.argwhere(x).flatten())
                        none0 = np.asarray(list(set(none0).difference(index0)))
                        if len(none0) >= 1:
                            p_calc = p_calc[none0, :][:, none0]
                            b = b[none0]
                            m[none0] = np.linalg.solve(p_calc, b)
            recc = (np.nan_to_num(
                (np.delete(P, desti, 1)[desti] * m), 0, posinf=np.inf).sum() +
                    1)
            fmpt_all[:, desti] = np.insert(m, desti, recc)
            fmpt_all = np.where(fmpt_all < -1e16, np.inf, fmpt_all)
            fmpt_all = np.where(fmpt_all > 1e16, np.inf, fmpt_all)
    return fmpt_all
import quantecon as qe
import numpy as np



def mc_sample_path(P,init=0,sample_size=1000):
    P=np.asarray(P)
    X=np.empty(sample_size,dtype=int)
    X[0]=init
    n=len(P)
    P_dist=[qe.DiscreteRV(P[i,:])for i in range(n)]
    for t in range(sample_size-1):
        X[t+1]=P_dist[X[t]].draw()

    return X


if __name__=="__main__":
    P=[[0.4,0.6],[0.2,0.8]]
    #X=mc_sample_path(P,sample_size=1000)

    mc=qe.MarkovChain(P,state_values=("employed","unemployed"))
    print(mc.simulate(ts_length=4,init="unemployed"))

예제 #23
0
파일: ergodic.py 프로젝트: xf37/giddy
def steady_state(P, fill_empty_classes=False):
    """
    Generalized function for calculating the steady state distribution
    for a regular or reducible Markov transition matrix P.

    Parameters
    ----------
    P        : array
               (k, k), an ergodic or non-ergodic Markov transition probability
               matrix.
    fill_empty_classes: bool, optional
                        If True, assign 1 to diagonal elements which fall in rows full
                        of 0s to ensure the transition probability matrix is a
                        stochastic one. Default is False.

    Returns
    -------
             : array
               If the Markov chain is irreducible, meaning that
               there is only one communicating class, there is one unique
               steady state distribution towards which the system is
               converging in the long run. Then steady_state is the
               same as _steady_state_ergodic (k, ).
               If the Markov chain is reducible, but only has 1 recurrent
               class, there will be one steady state distribution as well.
               If the Markov chain is reducible and there are multiple
               recurrent classes (num_rclasses), the system could be trapped
               in any one of  these recurrent classes. Then, there will be
               `num_rclasses` steady state distributions. The returned array
               will of (num_rclasses, k) dimension.

    Examples
    --------

    >>> import numpy as np
    >>> from giddy.ergodic import steady_state

    Irreducible Markov chain
    >>> p = np.array([[.5, .25, .25],[.5,0,.5],[.25,.25,.5]])
    >>> steady_state(p)
    array([0.4, 0.2, 0.4])

    Reducible Markov chain: two communicating classes
    >>> p = np.array([[.5, .5, 0],[.2,0.8,0],[0,0,1]])
    >>> steady_state(p)
    array([[0.28571429, 0.71428571, 0.        ],
           [0.        , 0.        , 1.        ]])

    Reducible Markov chain: two communicating classes
    >>> p = np.array([[.5, .5, 0],[.2,0.8,0],[0,0,0]])
    >>> steady_state(p, fill_empty_classes = True)
    array([[0.28571429, 0.71428571, 0.        ],
           [0.        , 0.        , 1.        ]])

    >>> steady_state(p, fill_empty_classes = False)
    Traceback (most recent call last):
        ...
    ValueError: Input transition probability matrix has 1 rows full of 0s. Please set fill_empty_classes=True to set diagonal elements for these rows to be 1 to make sure the matrix is stochastic.

    """

    P = np.asarray(P)
    rows0 = (P.sum(axis=1) == 0).sum()
    if rows0 > 0:
        if fill_empty_classes:
            P = fill_empty_diagonals(P)
        else:
            raise ValueError("Input transition probability matrix has "
                             "%d rows full of 0s. Please set "
                             "fill_empty_classes=True to set diagonal "
                             "elements for these rows to be 1 to make "
                             "sure the matrix is stochastic." % rows0)
    mc = qe.MarkovChain(P)
    num_classes = mc.num_communication_classes
    if num_classes == 1:
        return mc.stationary_distributions[0]
    else:
        return mc.stationary_distributions
예제 #24
0
'''
Compute and plot the stationary distribution of the matrix using one of the
methods in quantecon's MarkovChain object, combined with matplotlib.
'''
###############################################################################

P = [[0.222, 0.222, 0.215, 0.187, 0.081, 0.038, 0.029, 0.006],
     [0.221, 0.220, 0.215, 0.188, 0.082, 0.039, 0.029, 0.006],
     [0.207, 0.209, 0.210, 0.194, 0.090, 0.046, 0.036, 0.008],
     [0.198, 0.201, 0.207, 0.198, 0.095, 0.052, 0.040, 0.009],
     [0.175, 0.178, 0.197, 0.207, 0.110, 0.067, 0.054, 0.012],
     [0.182, 0.184, 0.200, 0.205, 0.106, 0.062, 0.050, 0.011],
     [0.123, 0.125, 0.166, 0.216, 0.141, 0.114, 0.094, 0.021],
     [0.084, 0.084, 0.142, 0.228, 0.170, 0.143, 0.121, 0.028]]

mc = qe.MarkovChain(P, ("1", "2", "3", "4", "5", "6", "7", "8"))
stationary_dist_p = mc.stationary_distributions
print(stationary_dist_p)
plt.plot(list(range(0, len(P[0]))), stationary_dist_p[0], color='r')
plt.scatter(list(range(0, len(P[0]))), stationary_dist_p[0])
plt.title(r"Stationary Distribution of $P$")
plt.ylabel("Probability")
plt.xlabel("Period")
plt.show()

###############################################################################
#
# Question 1
#
'''
eaa = np.linalg.matrix_power(qa, 100)

print("100th power of qa")
print(eaa)

ebb = np.linalg.matrix_power(qb, 100)

print("100th power of qb")
print(ebb)

import quantecon as qe

qa = np.array([[1. / 2, 1. / 2], [2. / 3, 1. / 3]])
qb = np.array([[2. / 3, 1. / 3], [1. / 4, 3. / 4]])

mcA = qe.MarkovChain(qa)
mcB = qe.MarkovChain(qb)

ppa = mcA.stationary_distributions
ppb = mcB.stationary_distributions

print("stationary distribution of P_a")

print(ppa)

mcB = qe.MarkovChain(qb)

ppb = mcB.stationary_distributions

print("stationary distribution of P_b")