예제 #1
0
def SMTCovariance(sigma2, k):
    # This function computes the sparse matrix transformation estimate for the
    # covariance matrix sigma2 by using the number of rotations indicated in
    # vector k
    #  INPUTS
    # sigma2      :[matrix](n_ x n_) starting covariance matrix
    # k         :[vector](1 x k_) vector containing the number of sparse rotations to be used
    #  OPS
    # sigma2_SMT  :[matrix](n_ x n_) transformed covariance matrix

    n_ = sigma2.shape[0]

    #generate (j_ x n_) sample with target cov = sigma2  and mean = 0
    Model = 'Riccati'
    j_ = floor(n_ * 2)
    m = zeros((n_, 1))
    epsi = NormalScenarios(m, sigma2, j_, Model)

    sigma2_SMT = zeros((n_, n_, len(k)))

    for i in range(len(k)):
        e, lam, ArraySMT = SMTCovarEst(epsi, k[i])
        CovSMT = e @ lam @ e.T
        sigma2_SMT[:, :,
                   i] = CovSMT / (diag(sqrt(CovSMT)) @ diag(sqrt(CovSMT)).T)
    return sigma2_SMT, ArraySMT
예제 #2
0
def GraphLasso(s2, lam):
    # This function estimates the covariance and inverse covariance matrix
    # using the graphical lasso algorithm.
    #  INPUTS
    # s2         :[matrix](n_ x n_) initial covariance matrix
    # lambda     :[scalar] penalty for the glasso algorithm
    #  OPS
    # s2_est     :[matrix](n_ x n_) estimated covariance matrix
    # invs2_est  :[matrix](n_ x n_) inverse of the estimated matrix
    # iter       :[scalar] number of performed iterations
    # avgTol     :[scalar] average tolerance of correlation matrix entries before terminating the algorithm
    # hasError   :[flag] flag indicating whether the algorithm terminated erroneously or not

    ## Code

    # generate j_ x n_ sample with target cov = s2 and mean = 0, in order to
    # use the function created by H. Karshenas, which reads data, instead of distributional parameters
    Model = 'Riccati'
    n_ = len((s2))
    j_ = int(floor(n_ * 2))
    m = zeros((n_, 1))
    epsi, _ = NormalScenarios(m, s2, j_, Model)

    s2_est, invs2_est, iter, hasError = GraphicalLasso(epsi.T, lam)
    return s2_est, invs2_est, iter, hasError
예제 #3
0
def PanicTDistribution(varrho2, r, c, nu, j_):
    # This function generates the joint scenarios and Flexible Probabilities of
    # a Panic t Distribution
    #  INPUTS
    #   varrho2      : [matrix] (n_ x n_) calm market correlation matrix
    #   r            : [scalar] homogeneous panic correlation
    #   c            : [scalar] prob threshold of a high-correlation crash event
    #   nu           : [scalar] degree of freedom
    #   j_           : [scalar] number of scenarios
    #  OPS
    #   X            : [matrix] (n_ x j_) joint scenarios
    #   p_           : [vector] (1 x j_) Flexible Probabilities (posterior via Entropy Pooling)
    #
    #NOTE: Moment matching of t-simulations for nu > 2

    # For details on the exercise, see here .

    ## Code
    n_ = len(varrho2)
    corr_c = varrho2
    corr_p = (1 - r) * eye(n_) + r * ones((n_, n_))  # panic corr
    optionT = namedtuple('option', ['dim_red', 'stoc_rep'])
    optionT.dim_red = 0
    optionT.stoc_rep = 0
    if nu > 1:
        # Calm Component
        Xt_c = Tscenarios(nu, zeros((n_, 1)), corr_c, j_, optionT, 'Riccati')

        # Panic Component
        Xt_p = Tscenarios(nu, zeros((n_, 1)), corr_p, j_, optionT, 'Riccati')
    else:
        s2 = block_diag(corr_c, corr_p)
        Z = NormalScenarios(zeros((2 * n_, 1)), s2, j_, 'Riccati')

        # Calm Component
        X_c = Z[:n_, :]
        Chi_2 = chi2.rvs(nu, 1, j_)
        Xt_c = X_c / tile(sqrt(Chi_2 / nu), (n_, 1))

        # Panic Component
        X_p = Z[n_:-1, :]
        Chi_2 = chi2.rvs(nu, 1, j_)
        Xt_p = X_p / tile(sqrt(Chi_2 / nu), (n_, 1))

    # Panic distribution
    B = (Xt_p < t.ppf(c, nu))  # triggers panic
    X = (1 - B) * Xt_c + B * Xt_p

    # Perturb probabilities via Fully Flexible Views
    p = ones((1, j_)) / j_  # flat flexible probabilities (prior)
    aeq = r_[ones((1, j_)), X]  # constrain the first moments
    beq = r_[array([[1]]), zeros((n_, 1))]

    p_ = MinRelEntFP(p, None, None, aeq,
                     beq)[0]  # compute posterior probabilities
    return X, p_
예제 #4
0
def UnifInsideEllScenarios(mu, sig2, j_, k_=0, method='Riccati', d=None):
    # This function generates j_ Monte carlo scenarios of an elliptical random variable
    # with location vector mu, dispersion matrix sigma2 and radial component
    # with quantile function q_R
    #  INPUTS
    #   mu      : [vector]            (n_ x 1) location vector
    #   sigma2  : [matrix]            (n_ x n_) dispersion matrix
    #   j_      : [scalar]            number of simulations
    #   k_      : [scalar]           (optional) number of factors for dimension
    #                                 reduction (we advise to take k_<<n_)
    # method    : [string]            Riccati (default), CPCA, PCA, LDL-Cholesky, Gram-Schmidt
    # d         : [matrix]           (k_ x n_) full rank constraints matrix for CPCA
    #  OPS
    #   X       : [matrix]           (n_ x j_) matrix of elliptical simulations
    #   R       : [vector]           (1 x j_)  vector of radial component scenarios
    #   Y       : [matrix]           (n_ x j_) matrix of uniform component scenarios

    # For details on the exercise, see here .
    ## Code

    n_ = len(mu)  # number of variables

    # Step 1. Radial Scenarios
    R = (rand(1, j_))**(1 / n_)

    # Step 2. Correlation
    rho2 = diag(diag(sig2)**(-1 / 2)) @ sig2 @ diag(diag(sig2)**(-1 / 2))

    # Step 3. Normal scenarios
    if k_ == 0:
        N = NormalScenarios(zeros((n_, 1)), rho2, j_, 'Riccati', d)[0]
    else:
        [N, beta] = DimRedScenariosNormal(zeros((n_, 1)), rho2, k_, j_,
                                          'Riccati', d)

    # Step 4. Inverse
    if k_ != 0:
        delta2 = diag(eye((n_)) - beta @ beta.T)
        omega2 = diag(1 / delta2)
        rho2_inv = omega2 - omega2 @ beta / (beta.T @ omega2 @ beta + eye(
            (k_))) @ beta.T @ omega2
    else:
        rho2_inv = solve(rho2, eye(rho2.shape[0]))

    #Step 5. Cholesky
    rho_inv = cholesky(rho2_inv).T

    #Step 6. Normalizer
    M = sqrt(npsum((rho_inv @ N)**2, 0))

    #Step 7. Output
    Y = rho_inv @ N @ diagflat(1 / M)
    X = tile(mu, (1, j_)) + diagflat(sqrt(diag(sig2))) @ N @ diagflat(
        1 / M) @ diagflat(R)
    return X, R, Y
예제 #5
0
def DimRedScenariosNormal(mu, sig2, k_, j_, method='Riccati', d=None):
    # This function generates Monte Carlo Scenarios from a multivariate normal
    # distribution with mean mu and covariance matrix sig2 through a dimension reduction
    # algorithm resorting to a linear factor model, where the matrix of loadings
    # is recovered through factor analysis of the correlation matrix
    # INPUTS
    # mu        :  [vector] (n_ x 1)  target mean
    # sig2      :  [matrix] (n_ x n_) target positive definite covariance matrix
    # k_        :  [scalar] number of factors to be considered for factor analysis (we reccomend k_ << n_)
    # j_        :  [scalar] Number of scenarios. If not even, j_ <- j_+1
    #   method  :  [string] Riccati (default), CPCA, PCA, LDL-Cholesky, Gram-Schmidt
    #   d       :  [matrix] (k_ x n_) full rank constraints matrix for CPCA
    # OUTPUTS
    # X         :  [matrix] (n_ x j_) panel of MC scenarios drawn from normal
    #                       distribution with mean mu and covariance matrix sig2
    # beta      :  [matrix] (optional) (n_ x k_) loadings matrix ensuing from factor
    # analysis of the correlation matrix
    #
    # For details on the exercise, see here .

    ## Code

    if mod(j_, 2) != 0:
        j_ = j_ + 1

    n_ = sig2.shape[1]

    #Step 1. Correlation
    rho2 = diag(diag(sig2)**(-1 / 2)) @ sig2 @ diag(diag(sig2)**(-1 / 2))

    #Step 2. Factor Loadings
    _, beta, _, _, _ = FactorAnalysis(rho2, zeros((1, 1)), k_)

    #Step 3. Residual Standard Deviation
    delta = sqrt(diag(eye((n_)) - beta @ beta.T))

    #Step 4. Systematic scenarios
    #sigm = r_[-1,r_[eye(k_),zeros((k_,n_))],r_[zeros((n_,k_)),eye(n_)]]
    sigm = concatenate((concatenate((eye(k_), zeros(
        (k_, n_))), axis=1), concatenate((zeros((n_, k_)), eye(n_)), axis=1)))
    S, _ = NormalScenarios(zeros((k_ + n_, 1)), sigm, j_, method, d)
    Z_tilde = S[:k_, :]

    #Step 5. Idiosyncratic scenarios
    U_tilde = S[k_:k_ + n_, :]

    #Step 6. Output
    X = tile(mu, (1, j_)) + diag(sqrt(
        diag(sig2))) @ (beta @ Z_tilde + diag(delta) @ U_tilde)
    return X, beta
예제 #6
0
def SimVAR1MVOU(x_0, u, theta, mu, sigma2, j_):
    # Simulate the MVOU process to future horizons by Monte Carlo method
    # model: dXt=-(theta*Xt-mu)dt+sigma*dWt
    #  INPUTS
    #   X_t        [matrix]: (n_ x j_) initial conditions at time t
    #   u          [vector]: (1 x u_)  projection horizons
    #   theta      [matrix]: (n_ x n_) transition matrix
    #   mu         [vector]: (n_ x 1) long-term means
    #   sigma2     [matrix]: (n_ x n_) covariances
    #   j_         [scalar]: simulations number
    #  OPS
    #   X_u        [tensor]: (n_ x j_ x u_) simulated process at times u_

    ## Code

    n_, _ = x_0.shape
    t_ = u.shape[1]

    if t_ > 1:
        tau = r_['-1', u[0, 0], u[0, 1:] - u[0, :-1]]
    else:
        tau = u.copy()

    X_u = zeros((n_, j_, t_))

    for t in range(t_):
        # project moments from t to t+tau
        mu_tau, sigma2_tau, _ = ProjMomentsVAR1MVOU(zeros((n_, 1)), tau[t], mu,
                                                    theta, sigma2)

        # simulate invariants
        Epsi, _ = NormalScenarios(zeros((n_, 1)), sigma2_tau, j_, 'Riccati')

        # simulte MVOU process to future horizon
        if t_ > 1 and t > 1:
            x_0 = X_u[:, :, t - 1]
        X_u[:, :, t] = expm(-theta * tau[t]) @ x_0 + tile(mu_tau,
                                                          (1, j_)) + Epsi
    return X_u.squeeze()
예제 #7
0
from NormalScenarios import NormalScenarios

# input parameters
j_ = int(1e4)  # number of simulations
rho = -0.8  # normal correlation
mu_X = array([[-2], [5]])  # normal expectation
svec_X = array([[1], [3]])  # normal standard deviations
# -

# ## Generate moment matching normal simulations

# +
c2_X = array([[1, rho], [rho, 1]])  # correlation matrix
s2_X = np.diagflat(svec_X)@[email protected](svec_X)  # covariance matrix

X,_ = NormalScenarios(mu_X, s2_X, j_, 'Chol')
X_1 = X[0]
X_2 = X[1]
# -

# ## Compute the grades scenarios

U_1 = norm.cdf(X_1, mu_X[0], svec_X[0])  # grade 1
U_2 = norm.cdf(X_2, mu_X[1], svec_X[1])  # grade 2
U = r_[U_1, U_2]  # joint realizations from the required copula

# ## Scatter-plot of the marginals

figure()
scatter(X_1, X_2, 0.5, [.5, .5, .5], '*')
plt.grid(True)
예제 #8
0
        p_DGP[k] = zeros((i_,t_))
        for i in range(i_):
            a =r_[ones((1, t_)), epsi[[i],:]]
            b = r_[array([[1]]),(p@epsi[[i], :].T)*Z[i, k] / 100]
            p_DGP[k][i, :] = MinRelEntFP(p, None, None, a, b)[0]

    # marginals for CMA-combination
    for i in range(i_):
        y[i, :], idy = sort(epsi[i,:]), argsort(epsi[i,:])
        # f = p_DGP[k][0,idy]
        f = p_DGP[k][i,idy]
        ff[i, :] = cumsum(f)

    for j in range(j_):
        # Randomize time series I
        m, _ = NormalScenarios(zeros((i_, 1)), c2_DGP[k], t_, 'Riccati')
        U1 = norm.cdf(m)
        if npsum(U1==0) >= 1:
            print(k)
        I = CopMargComb(y, ff, U1)

        # Evaluate the correlation estimators
        C2_hat[:,:, j] = corrcoef(I)  # sample correlation
        C2_bar[:,:, j] = real(FactorAnalysis(C2_hat[:,:, j], d, rank)[0])  # shrinkage correlation

        # Compute the losses
        L_hat[j] = linalgnorm(C2_hat[:,:, j]-c2_DGP[k], ord='fro')**2  # sample loss
        L_bar[j] = linalgnorm(C2_bar[:,:, j]-c2_DGP[k], ord='fro')**2  # shrinkage loss

    # Compute errors
    er_hat[k] = mean(L_hat)  # sample error
예제 #9
0
j_ = 10000
r = 3  # scale of the bands
n_points = 1000  # points of the bands
mu = array([[0.2], [0.5]])
sigma2 = array([[1, 0.5], [0.5, 0.8]])
# -

# ## Compute the standard deviations along the directions

# +
theta = linspace(0, 2 * pi, n_points).reshape(1, -1)
u = r_[cos(theta), sin(theta)]  # directions

sigma_u = sqrt(diag(u.T @ sigma2 @ u))  # projected standard deviations
# -

# ## Generate the normal sample

X, _ = NormalScenarios(mu, sigma2, j_, 'Chol')

# ## Display the band, the ellipsoid and overlay the scatterplot

figure(figsize=(10, 10))
p1 = PlotTwoDimBand(mu, sigma_u, u, r, 'b')
p2 = PlotTwoDimEllipsoid(mu, sigma2, r, [], [], 'r')
scatter(X[0], X[1], s=5, c=[.3, .3, .3], marker='*')
legend(['Exp-Std. dev. band', 'Exp-Cov ellipsoid'])
title('Bivariate normal')
plt.axis('equal')
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
예제 #10
0
from PlotTwoDimEllipsoid import PlotTwoDimEllipsoid
from HistogramFP import HistogramFP
from NormalScenarios import NormalScenarios

# inputs
j_ = 10000  # simulations
mu = array([[0.17], [-2.5]])  # expectation
svec = array([[0.4], [1.2]])  # volatilities
rho = -0.8  # correlation
s2 = np.diagflat(svec) @ array([[1, rho], [rho, 1]]) @ np.diagflat(
    svec)  # covariance matrix
# -

# ## Generate bivariate normal simulations

Y, _ = NormalScenarios(mu, s2, j_, 'Riccati')
X = Y[[0]]
Z = Y[[1]]

# ## Compute the simulations of conditional expectation

phiZ = mu[0] + rho * svec[0] / svec[1] * (Z - mu[1])
mu_XphiZ = mu[0] * array([[1], [1]])  # joint expectation of X and E{X|Z}
pos = rho**2 * s2[0, 0]
s2_XphiZ = array([[s2[0, 0], pos], [pos,
                                    pos]])  # covariance matrix of X and E{X|Z}

# ## Plot the empirical pdf of X and overlay the pdf of the conditional expectation

# +
nbins = round(7 * log(j_))
예제 #11
0
def Tscenarios(nu, mu, sig2, j_, optionT=None, method='Riccati', d=None):
    # This function generates student t simulations whose
    # moments match the theoretical moments mu_, nu/(nu-2)@sigma2_, either from
    # radial or stochastic representation and through dimension reduction.
    #  INPUTS
    #   nu              : [scalar] degrees of freedom
    #   mu              : [vector] (n_ x 1) vector of means
    #   sigma2          : [matrix] (n_ x n_) dispersion matrix
    #   j_              : [scalar] (even) number of simulations
    #   optionT         : [struct] with fields (defaults values are 0 for both fields)
    #   optionT.dim_red : [scalar] number of factors to be used for normal
    #                     scenario generation with dimension reduction. If it is set to 0, normal
    #                     scenarios are generated without dimension reduction.
    #   optionT.stoc_rep : [scalar] Set it to 1 to generate t scenarios through
    #                     stochastic representation via normal and chi-square scenarios.
    #   method          : [string] Riccati (default), CPCA, PCA, LDL-Cholesky,
    #                              Gram-Schmidt, Chol
    #   d               : [matrix] (k_ x n_) full rank constraints matrix for CPCA
    #  OPS
    #   X               : [matrix] (n_ x j_) matrix of scenarios drawn from a
    #                     Student t distribution t(nu,mu,sig2).
    #
    #
    # NOTE: Use always a large number of simulations j_ >> n_ to ensure that
    #       NormalScenarios works properly. Also we reccommend a low number of
    #       factors k_<< n_

    # For details on the exercise, see here .
    ## Code

    if optionT is None:
        optionT = namedtuple('option', ['dim_red', 'stoc_rep'])
        optionT.dim_red = 0
        optionT.stoc_rep = 0
    n_ = len(mu)
    k_ = optionT.dim_red

    if optionT.stoc_rep == 0:
        # Step 1. Radial scenarios
        R = sqrt(n_ * f.ppf(rand(j_, 1), n_, nu))

    # Step 2. Correlation
    rho2 = np.diagflat(diag(sig2) ** (-1 / 2)) @ sig2 @ np.diagflat(diag(sig2) ** (-1 / 2))

    # Step 3. Normal scenarios
    if optionT.dim_red > 0:
        N, beta = DimRedScenariosNormal(zeros((n_, 1)), rho2, k_, j_, method, d)
    else:
        N, _ = NormalScenarios(zeros((n_, 1)), rho2, j_, method, d)

    # Step 4. Inverse
    if optionT.dim_red > 0:
        delta2 = diag(eye(n_) - beta @ beta.T)
        omega2 = np.diagflat(1 / delta2)
        rho2_inv = omega2 - omega2 @ beta / (beta.T @ omega2 @ beta + eye((k_))) @ beta.T @ omega2
    else:
        rho2_inv = solve(rho2, eye(rho2.shape[0]))

    # Step 5. Cholesky
    rho_inv = cholesky(rho2_inv)

    # Step 6. Normalizer
    M = sqrt(npsum((rho_inv @ N) ** 2, axis=0))

    # Step 7. Output
    if optionT.stoc_rep == 0:
        # Elliptical representation
        X = tile(mu, (1, j_)) + np.diagflat(sqrt(diag(sig2))) @ N @ np.diagflat(1 / M) @ np.diagflat(R)
    else:
        # Stochastic representation
        v = chi2.ppf(rand(j_, 1), nu) / nu
        X = tile(mu, (1, j_)) + np.diagflat(sqrt(diag(sig2))) @ N @ np.diagflat(sqrt((1 / v)))
    return X
예제 #12
0
sig_YF = sig2_YF[:n_, n_ :n_+ k_]
sig2_F = sig2_YF[n_ :n_+ k_, n_ :n_ + k_]

# computation of beta
exp_Y = exp(mu_Y + diag(sig2_Y).reshape(-1,1) / 2)
exp_F = exp(mu_F + diag(sig2_F).reshape(-1,1) / 2)
beta = np.diagflat(exp_Y)@(exp(sig_YF) - ones((n_, k_))).dot(pinv((exp(sig2_F) - ones((k_, k_)))@np.diagflat(exp_F)))

# computation of alpha
alpha = exp_Y - ones((n_, 1)) - beta@(exp_F - ones((k_, 1)))
# -

# ## Generate simulations for variables Y,F and deduce simulations for X,Z

# +
YF = NormalScenarios(mu_YF, sig2_YF, j_, 'Riccati')[0]

XZ = exp(YF) - 1
X = XZ[:n_,:]
Z = XZ[n_:n_ + k_,:]
# -

# ## Set Flexible Probabilities

p = ones((j_, 1)) / j_

# ## Estimate regression LFM

[alpha_OLSFP, beta_OLSFP, s2_OLSFP, U] = OrdLeastSquareFPNReg(X, Z, p)

# ## Compute estimation errors
예제 #13
0
def kMeansClustering(c2,k_,i_s,l_s,opt=0):
    # This function performs k-means clustering
    #  INPUTS:
    # c2          :[matrix](n_ x n_) starting correlation matrix
    # k_          :[scalar] max number of clusters
    # i_s         :[vector](n_ x 1) index vector such that i_s((l_s[i]+1:l_s(i+1))) points to the companies in the i-th sector
    # l_s         :[vector]
    # opt         :[scalar]if opt==1 the function performs k-means clustering for every k<=k_
    #  OPS:
    # c2_c        :[matrix](n_ x n_) correlation matrix sorted into clusters
    # i_c         :[vector](n_ x 1) index vector such that c2_c = c2(i_c,i_c)
    # l_c         :[vector]
    # sect2clust  :[vector](k_ x 1) vector of indeces that establish a correspondance between the k_ sectors and the k_ clusters

    ## Code

    #generate (n_ x t_end) sample with target corr = rho2 and mean = 0
    Model = 'Riccati'
    n_ = len(c2)
    j_ = int(floor(n_*2))
    mu = zeros((n_,1))
    Epsi = NormalScenarios(mu,c2,j_,Model)[0]

    n_sector = len(l_s)-1

    if opt == 1:
        i_c = zeros((n_,k_))
        l_c = zeros((k_+1,k_))
        c2_c = zeros((n_,n_,k_))

        for k in range(k_):
            # find k clusters
            km = KMeans(n_clusters=k)
            if k == n_sector:
                C_start = zeros((k_,j_))
                for k1 in range(k_):
                    C_start[k1] = mean(Epsi[i_s[l_s[k1]:l_s[k1+1]],:])
                km.fit(Epsi)
            else:
                km.fit(Epsi)
            IDX, mu = km.labels_, km.cluster_centers_
            #sort by clusters
            i_c[:,k] = argsort(IDX)
            l_tmp = zeros((k,1))
            for i in range(k):
                l_tmp[i] = npsum(IDX == i)
            l_c[:k+1,k] = [0, cumsum(l_tmp)]
            c2_c[:,:,k] = c2[ix_(i_c[:,k],i_c[:,k])]
    else:
            # find k_ clusters
            km = KMeans(n_clusters=k_)
            if k_ == n_sector:
                C_start = zeros((k_,j_))
                for k1 in range(k_):
                    C_start[k1] = mean(Epsi[i_s[l_s[k1]:l_s[k1+1]],:])
                km.fit(Epsi)
            else:
                km.fit(Epsi)
            IDX, mu = km.labels_, km.cluster_centers_
            #sort by clusters
            i_c = argsort(IDX)
            l_tmp = zeros((k_,1))
            for i in range(k_):
                l_tmp[i] = npsum(IDX == i)
            l_c = [0, cumsum(l_tmp)]
            c2_c = c2[ix_(i_c,i_c)]

    #find agreement between sectors and clusters
    sect2clust = zeros((k_,1))
    p = zeros((k_,n_sector))
    for k in range(n_sector):
        id = i_s[l_s[k]:l_s[k+1]] # comps in sector k
        p[:,k] = histogram(IDX[id].T,bins=arange(1,k_+2))[0]#joint probability
    for k in range(k_):
        sect2clust[k] = argmax(p[k])

    return c2_c, i_c, l_c, sect2clust
예제 #14
0
R2_XReg = zeros((stepsize, 1))
R2_XCS = zeros((stepsize, 1))

for n in range(stepsize):

    # ## Generate a sample from the joint distribution of the factors and residuals

    mu_ZU = zeros((k_ + nstep[n], 1))  # expectation
    sig2_ZU = zeros((k_, nstep[n]))  # systematic condition
    d = sig2_U * ones((nstep[n], 1))
    sigma2_U = np.diagflat(d * d)  # idiosyncratic condition
    sigma2_ZU = r_[r_['-1', sigma2_Z, sig2_ZU], r_['-1', sig2_ZU.T,
                                                   sigma2_U]]  # covariance

    Z_U, _ = NormalScenarios(mu_ZU, sigma2_ZU, j_)  # joint sample
    # Z_U = Z_U.T  # ensure Z_U is (k_ + n_) x nsim

    # ## Generate target sample according to systematic-idiosyncratic LFM

    Z = Z_U[:k_, :]  # observable factors sample
    U = Z_U[k_:, :]  # observable residuals sample
    beta_XZ = randn(nstep[n], k_)  # observable loadings

    i_n = eye(nstep[n])
    X = r_['-1', beta_XZ, i_n] @ Z_U  # target sample
    sigma2_X = beta_XZ @ sigma2_Z @ beta_XZ.T + sigma2_U  # (low-rank diagonal) covariance

    sigma2_XZ = beta_XZ @ sigma2_Z  # covariance of target and factors

    invres2 = np.diagflat(1 / (d * d))  # inverse residuals covariance
예제 #15
0
# parameters
j_ = 10000  # number of simulations
n_ = 2  # number of instruments in the portfolio
mu = array([[0.01], [
    0.08
]])  # mean of the normal distribution of the instruments compounded returns
sigma2 = array([
    [0.03, -0.057], [-0.057, 0.12]
])  # variance of the normal distribution of the instruments compounded returns
w = array([[0.5], [0.5]])  # portfolio weights
# -

# ## Generate j_=10000 normal simulations of the instruments compounded returns
# ## by using function NormalScenarios

Instr_comp_ret = NormalScenarios(mu, sigma2, j_)[0]

# ## Compute the portfolio compounded returns

r = exp(Instr_comp_ret) - 1
r_w = npsum(tile(w, (1, j_)) * r, axis=0, keepdims=True)
ptf_comp_ret = log(1 + r_w)

# ## Compute the normalized empirical histogram stemming from the simulations using function HistogramFP

p = ones((1, j_)) / j_
option = namedtuple('option', 'n_bins')
option.n_bins = round(10 * log(j_))
nx, cx = HistogramFP(ptf_comp_ret, p, option)

# ## Plot the histogram of the compounded returns of the portfolio together with the normal fit.
예제 #16
0
m = array([[0.17], [0.06]])  # (normal) expectation
svec = array([[0.24], [0.14]])  # (normal) standard deviation
rho = 0.15  # (normal) correlation
# -

# ## Compute lognormal expectation and covariance

c2_ = array([[1, rho], [rho, 1]])  # (normal) correlation matrix
s2 = np.diagflat(svec) @ c2_ @ np.diagflat(svec)  # (normal) covariance matrix
mu = exp(m + 0.5 * diag(s2).reshape(-1, 1))  # expectation
sig2 = np.diagflat(mu) @ (exp(s2) - ones(
    (2, 1))) @ np.diagflat(mu)  # covariance matrix

# ## Generate bivariate lognormal draws

X = exp(NormalScenarios(m, s2, j_, 'Riccati')[0])

# ## Compute a the Riccati root of the correlation matrix and the vectors

# +
sigvec = sqrt(diag(sig2))  # standard deviation
c2 = np.diagflat(1 / sigvec) @ sig2 @ np.diagflat(
    1 / sigvec)  # correlation matrix

c = Riccati(eye(2), c2)
x = c.T @ np.diagflat(sigvec)
# -

# ## Compute Euclidean measures

inn_prods = x.T @ x
예제 #17
0
# ## Run script S_AggregProjection

from S_AggregatesEstimation import *

# ## Generate Monte Carlo projected path scenarios for each standardized cluster aggregating factor

# +
M_c1 = zeros((m_,j_))
M_c3 = zeros((m_,j_))
Zc1_tilde_proj = zeros((k_c1,m_,j_))
Zc3_tilde_proj = zeros((k_c3,m_,j_))

for m in range(m_):
    # Multivariate normal scenarios
    N_agg,_ = NormalScenarios(zeros((k_c1 + k_c3, 1)), rho2_aggr, j_)

    # Chi-squared scenarios
    M_c1[m, :] = chi2.ppf(rand(j_), k_c1)
    M_c3[m, :] = chi2.ppf(rand(j_), k_c3)

    # path scenarios
    Zc1_tilde_proj[:, m, :] = N_agg[:k_c1,:]@sqrt(diag(1 / M_c1[m, :]))

    Zc3_tilde_proj[:, m, :] = N_agg[k_c1 :k_c1 + k_c3,:]@sqrt(diag(1 / M_c3[m, :]))
# -

# ## Recover the projected paths scenarios for the standardized cluster 1

Xc1_tilde_proj =zeros((i_c1,m_,j_))
for m in range(m_):