コード例 #1
0
ファイル: LassoRegFP.py プロジェクト: s0ap/arpmRes
def LassoRegFP(X, Z, p, lam, smartinverse=0):
    # Weighted lasso regression function
    # Note: LassoRegFP includes the function solveLasso created by GAUTAM V. PENDSE, http://www.gautampendse.com)
    # We made some changes in PENDSE's function in order to adapt it with SYMMYS's notation
    # The changes are made in conformity with the Creative Commons Attribution 3.0 Unported License
    #  INPUTS
    # x       :[matrix](n_ x t_end) time series of market observations
    # z       :[matrix](k_ x t_end) time series of factors
    # p       :[vector](t_end x 1) flexible probabilities
    # lambda  :[vector](l_ x 1) vector of penalties
    #  OP
    # alpha   :[matrix](n_ x l_) shifting term
    # beta    :[array](n_ x k_ x l_) loadings
    # s2_U    :[array](n_ x n_ x l_) covariance of residuals
    # U       :[array](n_ x t_end x l_) residuals

    ## Code

    n_, t_ = X.shape
    k_ = Z.shape[0]
    if isinstance(lam, float) or isinstance(lam, int):
        l_ = 1
        lam = array([lam])
    else:
        l_ = len(lam)

    if smartinverse is None:
        smartinverse = 0

    # if p are not provided, observations are equally weighted
    if p is None:
        p = (1 / t_) @ ones((1, t_))

    # solve optimization
    if l_ == 1 and lam == 0:
        [alpha, beta, s2_U, U] = OrdLeastSquareFPNReg(X, Z, p, smartinverse)
    else:
        # preliminary de-meaning of x and z
        m_X, _ = FPmeancov(X, p)
        m_Z, _ = FPmeancov(Z, p)
        X_c = X - tile(m_X, (1, t_))
        Z_c = Z - tile(m_Z, (1, t_))
        # trick to adapt function solveLasso to the FP framework
        X_p = X_c @ sqrt(np.diagflat(p))
        Z_p = Z_c @ sqrt(np.diagflat(p))
        # initialize variables
        beta = zeros((n_, k_, l_))
        alpha = zeros((n_, l_))
        s2_U = zeros((n_, n_, l_))
        U = zeros((n_, t_, l_))
        # solve lasso
        for l in range(l_):
            for n in range(n_):
                output = solveLasso(X_p[[n], :], Z_p, lam[l], smartinverse)
                beta[n, :, l] = output.beta
            alpha[:, [l]] = m_X - beta[:, :, l] @ m_Z
            U[:, :, l] = X - tile(alpha[:, [l]], (1, t_)) - beta[:, :, l] @ Z
            _, s2_U[:, :, l_ - 1] = FPmeancov(U[:, :, l], p)

    return alpha, beta[..., np.newaxis], s2_U[..., np.newaxis], U
コード例 #2
0
def TwistScenMomMatch(x, p, mu_, s2_, method='Riccati', d=None):
    # This def twists scenarios x to match arbitrary moments mu_ sigma2_
    #  INPUTS
    #   x      : [matrix] (n_ x j_) scenarios
    #   p      : [vector] (1 x j_) flexible probabilities
    #   mu_    : [vector] (n_ x 1) target means
    #   s2_    : [matrix] (n_ x n_) target covariances
    #   method : [string] Riccati (default), CPCA, PCA, LDL-Cholesky, Gram-Schmidt
    #   d      : [matrix] (k_ x n_) full rank constraints matrix for CPCA
    #  OUTPUTS
    #   x_     : [matrix] (n_ x j_) twisted scenarios

    # For details on the exercise, see here .
    ## Code

    # Step 1. Original moments
    mu_x, s2_x = FPmeancov(x, p)

    # Step 2. Transpose-square-root of s2_x
    r_x = TransposeSquareRoot(s2_x, method, d)

    # Step 3. Transpose-square-root of s2_
    r_ = TransposeSquareRoot(s2_, method, d)

    # Step 4. Twist factors
    b = dot(r_, pinv(r_x))

    # Step 5. Shift factors
    a = mu_ - b.dot(mu_x)

    # Step 6. Twisted scenarios
    x_ = tile(a, (1, x.shape[1])) + b.dot(x)

    return x_
コード例 #3
0
ファイル: PrincCompFP.py プロジェクト: s0ap/arpmRes
def PrincCompFP(X, p, sig2, k_):
    # This function computes the estimators of the shifting term alpha, optimal
    # loadings beta, factor extraction matrix gamma and covariance of residuals
    # s2 for a statistical LFM, by using the non-parametric approach
    #  INPUTS
    #   X         :[matrix] (n_ x t_end) time-series of target variables
    #   p         :[vector] (1 x t_end) flexible probabilities
    #   k_        :[scalar] number of factors
    #  OPS
    #   alpha_PC  :[vector] (n_ x 1) estimator of the shifting term
    #   beta_PC   :[matrix] (n_ x k_) estimator of loadings
    #   gamma_PC  :[matrix] (k_ x n_) estimator of factor-extraction matrix
    #   s2_PC     :[matrix] (n_ x n_) estimator of dispersion of residuals

    ## code
    n_,_ = X.shape
    # compute HFP-expectation and covariance of X
    m_X,s2_X = FPmeancov(X,p)
    # compute the Choleski root of sig2
    sig = cholesky(sig2)
    # perform spectral decomposition
    s2_tmp = solve(sig,(s2_X.dot(pinv(sig))))
    Diag_lambda2, e = eig(s2_tmp)
    lambda2 = Diag_lambda2
    lambda2, index = sort(lambda2)[::-1], argsort(lambda2)[::-1]
    e = e[:,index]
    # compute optimal loadings for PC LFM
    beta_PC = sig@e[:,:k_]
    # compute factor extraction matrix for PC LFM
    gamma_PC = e[:,:k_].T.dot(pinv(sig))
    # compute shifting term for PC LFM
    alpha_PC = (eye(n_)-beta_PC@gamma_PC)@m_X
    # compute the covariance of residuals
    s2_PC = sig@e[:,k_:n_]@diagflat(lambda2[k_:n_])@e[:,k_:n_][email protected]
    return alpha_PC, beta_PC, gamma_PC, s2_PC
コード例 #4
0
def NonParamCointegrationFP(x, p, time_step, theta_threshold):
    ## This function estimates the cointegrated vectors of a multivariate process
    #  INPUTS
    #   x                :[matrix](d_ x t_end) historical series
    #   p                :[vector](1 x t_end) historical Flexible Probabilities
    #   time_step        :[scalar] estimation step
    #   theta_threshold  :[scalar] positive threshold for stationarity test
    #  OPS
    #   c                :[matrix](d_ x k_) k_ cointegrated eigenvectors
    #   y_hat            :[matrix](k_ x t_end) cointegrated time series
    #   lam_y            :[vector](k_ x 1) eigenvectors corresponding to cointegrated eigenvectors
    #   mu_hat           :[vector](k_ x 1) estimated long-run expectation
    #   theta            :[matrix](k_ x 1) estimated transition parameter
    #   sd_hat           :[vector](k_ x 1) estimated long-run standard deviation

    # For details on the exercise, see here .
    ## Code

    # number of variables
    d_ = x.shape[0]

    # estimate HFP covariance matrix
    _, sigma2_hat = FPmeancov(x, p)

    # pca decomposition
    e_hat, lam_hat = pcacov(sigma2_hat)

    # define series
    y_t = e_hat.T @ x

    # fit the series
    k = 0
    for d in arange(d_ - 1, -1, -1):
        # fit the series with an univariate OU process
        alpha, b, sig2_U = FitVAR1(y_t[[d], :], p[[0], :-1] / npsum(p[0, :-1]))
        mu, theta, sigma2, _ = VAR1toMVOU(alpha, b, sig2_U, time_step)
        #[mu, theta, sigma2] = FitVAR1MVOU(dy_t, y_t(d, 1:-1), time_step, p([:-1]/sum(p[:-1])))

        # cointegrated vectors
        if theta > theta_threshold:
            if k == 0:
                c = e_hat[:, [d]]
                y_hat = y_t[[d], :]
                lam_y = lam_hat[d]
                mu_hat = solve(theta, mu)
                theta_hat = theta
                sd_hat = sqrt(sigma2 / (2 * theta))
            else:
                c = r_['-1', c, e_hat[:, [d]]]
                y_hat = r_[y_hat, y_t[[d], :]]
                lam_y = r_[lam_y, lam_hat[d]]
                mu_hat = r_[mu_hat, solve(theta, mu)]
                theta_hat = r_[theta_hat, theta]
                sd_hat = r_[sd_hat, sqrt(sigma2 / (2 * theta))]
            k = k + 1

    return c, y_hat, lam_y, mu_hat, theta_hat, sd_hat
コード例 #5
0
ファイル: RidgeRegFP.py プロジェクト: s0ap/arpmRes
def RidgeRegFP(X, Z, p, lam):
    # This function computes the solutions of the ridge regression of X on Z
    #  INPUTS
    # X        :[matrix](n_ x t_end) time series of target observations
    # Z        :[matrix](k_ x t_end) time series of factors observations
    # p        :[vector](1 x t_end) Flexible Probabilities
    # lam   :[vector](1 x l_) penalties for ridge regression
    #  OPS
    # alpha_l  :[matrix](n_ x l_) shifting parameter
    # beta_l   :[array](n_ x k_ x l_) array of optimal loadings
    # s2_l     :[array](n_ x n_ x l_) covariance matrix of residuals
    # U        :[array](n_ x t_end x l_) time series of residuals

    ## Code

    n_, t_ = X.shape
    k_ = Z.shape[0]
    l_ = len(lam)

    # if p are not provided, observations are equally weighted
    if p is None:
        p = (1 / t_) @ ones((1, t_))

    # compute HFP mean and covariance of joint variable (XZ)
    m_joint, s2_joint = FPmeancov(r_[X, Z], p)
    m_X = m_joint[:n_]
    m_Z = m_joint[n_:n_ + k_ + 1]
    s2_XZ = s2_joint[:n_, n_:n_ + k_ + 1]
    s2_Z = s2_joint[n_:n_ + k_ + 1, n_:n_ + k_ + 1]

    alpha_l = zeros((n_, l_))
    beta_l = zeros((n_, k_, l_))
    s2_l = zeros((n_, n_, l_))
    U = zeros((n_, t_, l_))
    # compute solutions for every penalty
    for l in range(l_):
        beta_l[:, :, l] = s2_XZ.dot(pinv(s2_Z + lam[l] * eye(k_)))
        alpha_l[:, l] = m_X - beta_l[:, :, l] @ m_Z
        U[:, :, l] = X - tile(alpha_l[:, l], (1, t_)) - beta_l[:, :, l] @ Z
        [_, s2_l[:, :, l]] = FPmeancov(U[:, :, l], p)

    return alpha_l, beta_l, s2_l, U
コード例 #6
0
ファイル: CopulaOpinionPooling.py プロジェクト: s0ap/arpmRes
def CopulaOpinionPooling(X_pri, p, v, c, FZ_pos):
    # This function performs the Copula Opinion Pooling approach for distributional
    # views processing
    #  INPUTS
    #   X_pri       : [matrix] (n_ x j_) market prior scenarios
    #   p           : [vector] (1 x j_) Flexible Probabilities
    #   v           : [matrix] (k_ x n_) pick matrix
    #   c           : [vector] (k_ x 1) confidence levels
    #   FZ_pos      : [cell] (k_ x 1) views cdf's
    #  OPS
    #   X_pos       : [matrix] (n_ x j_) market updated scenarios
    #   Z_pri       : [matrix] (k_ x j_) view variables prior scenarios
    #   U_pri       : [matrix] (k_ x j_) copula of prior view variables
    #   Z_pos       : [matrix] (k_ x j_) updated scenarios of view variables
    #   v_tilde     : [matrix] (n_ x n_) augmented pick matrix
    #   Z_tilde_pri : [matrix] (n_x j_) augmented prior scenarios view variables
    #   Z_tilde_pos : [matrix] (n_x j_) augmented posterior scenarios view variables

    # For details on the exercise, see here .
    ## Code

    [_,j_]=X_pri.shape
    k_=v.shape[0]

    # scenarios of the prior view variables
    Z_pri = v@X_pri

    # copula of the view variables
    Z_sorted, FZ_, U_pri = CopMargSep(Z_pri,p) # copula of Z_

    # matrix of the updated cdf's
    FZ_pos_matrix=zeros((k_,j_))
    for k in range(k_):
        FZ_pos_matrix[k]=c[k]*FZ_pos[k](Z_sorted[k])+(1-c[k])*FZ_[k]

    # scenarios of the posterior view variables
    Z_pos=CopMargComb(Z_sorted,FZ_pos_matrix,U_pri)

    # augmentation of the pick matrix
    _,s2 = FPmeancov(X_pri,p)
    a = v@s2
    v_ort = nullspace(a)[1].T
    v_tilde = r_[v,  v_ort]

    # augmentation of the view variables
    Z_tilde_pri = v_tilde@X_pri

    # posterior view variables
    Z_tilde_pos = r_[Z_pos,Z_tilde_pri[k_:,:]]

    # posterior market variables
    X_pos = solve(v_tilde,Z_tilde_pos)
    return X_pos, Z_pri, U_pri, Z_pos, v_tilde, Z_tilde_pri, Z_tilde_pos
コード例 #7
0
ファイル: Stats.py プロジェクト: s0ap/arpmRes
def Stats(epsi, FP=None):
    # Given a time series (epsi) and the associated probabilities FP,
    # this def computes the statistics: mean,
    # standard deviation,VaR,CVaR,skewness and kurtosis.
    # INPUT
    # epsi :[vector] (1 x t_end)
    # FP   :[matrix] (q_ x t_end) statistics are computed for each of the q_ sets of probabilities.
    # OUTPUT
    # m     :[vector] (q_ x 1) mean of epsi with FP (for each set of FP)
    # stdev :[vector] (q_ x 1) standard deviation of epsi with FP (for each set of FP)
    # VaR   :[vector] (q_ x 1) value at risk with FP
    # CVaR  :[vector] (q_ x 1) conditional value at risk with FP
    # sk    :[vector] (q_ x 1) skewness with FP
    # kurt  :[vector] (q_ x 1) kurtosis with FP
    ###########################################################################

    # size check
    if epsi.shape[0] > epsi.shape[1]:
        epsi = epsi.T  # eps: row vector
    if FP.shape[1] != epsi.shape[1]:
        FP = FP.T

    # if FP argument is missing, set equally weighted FP
    t_ = epsi.shape[1]
    if FP is None:
        FP = ones((1, t_)) / t_

    q_ = FP.shape[0]

    m = zeros((q_, 1))
    stdev = zeros((q_, 1))
    VaR = zeros((q_, 1))
    CVaR = zeros((q_, 1))
    sk = zeros((q_, 1))
    kurt = zeros((q_, 1))

    for q in range(q_):
        m[q] = (epsi * FP[[q], :]).sum()
        stdev[q] = sqrt(npsum(((epsi - m[q])**2) * FP[q, :]))
        SortedEps, idx = sort(epsi), argsort(epsi)
        SortedP = FP[[q], idx]
        VarPos = where(cumsum(SortedP) >= 0.01)[0][0]
        VaR[q] = -SortedEps[:, VarPos]
        CVaR[q] = -FPmeancov(
            SortedEps[[0], :VarPos + 1],
            SortedP[:, :VarPos + 1].T / npsum(SortedP[:, :VarPos + 1]))[0]
        sk[q] = npsum(FP[q, :] * ((epsi - m[q])**3)) / (stdev[q]**3)
        kurt[q] = npsum(FP[q, :] * ((epsi - m[q])**4)) / (stdev[q]**4)

    return m, stdev, VaR, CVaR, sk, kurt
コード例 #8
0
def EwmaFP(epsi, lam):
    # This function computes the exponentially weighted moving average (EWMA)
    # expectations and covariances for time series of invariants
    #  INPUTS
    #   epsi   : [matrix] (n_ x t_end) matrix of invariants observations
    #   lam : [scalar]           half-life parameter
    #  OPS
    #   mu     : [vector] (n_ x 1)  EWMA expectations
    #   sigma2 : [matrix] (n_ x n_) EWMA covariances

    # For details on the exercise, see here .
    ## Code
    _, t_ = epsi.shape
    p = exp(-lam * arange(t_ - 1, 0 + -1, -1)) / npsum(
        exp(-lam * arange(t_ - 1, 0 + -1, -1)))  # flexible probabilities
    mu, sigma2 = FPmeancov(epsi, p.reshape(1, -1))
    return mu, sigma2
コード例 #9
0
def SimulateCompPoisson(lam, jumps_ts, p, ts, j_, method):
    # Simulate a Compound Poisson Process
    #  INPUTS
    # lam   :[scalar] Poisson process arrival rate
    # jumps_ts :[vector](1 x t_end) time series of realized jumps
    # p        :[vector](1 x t_end) vector of Flexible Probabilities associated with jumps_ts
    # ts       :[row vector] vector of future time steps with ts[0]=0
    # j_       :[scalar] number of simulations
    # method   :[string] ExpJumps or FromHistogram, chooses how to model jumps
    #  OPS
    # x        :[matrix](j_ x len(ts)) simulated paths

    ## Code
    tau = ts[0, -1]
    k_ = ts.shape[1]
    # simulate number of jumps
    n_jumps = poisson.rvs(lam * tau, size=(j_, 1))

    jumps = zeros((j_, k_))
    for j in range(j_):
        # simulate jump arrival time
        t = tau * rand(1, n_jumps[j, 0])
        t = sort(t)

        # simulate jumps size
        if method == 'FromHistogram':
            c = rand(1, n_jumps[j, 0])
            S = HFPquantile(jumps_ts, c, p)
#             for k in range(n_jumps([j])
#                 S[k] = HFPquantile((jumps_ts,c[k],p))
#
        elif method == 'ExpJumps':
            #fit of the exponential parameter with FP
            mu, _ = FPmeancov(jumps_ts, p)
            S = expon.rvs(scale=mu, size=(1, n_jumps[j, 0]))

        # put things together
        CumS = cumsum(S)
        for k in range(1, k_):
            events = npsum(t <= ts[0, k])
            if events > 1:
                jumps[j, k] = CumS[events - 1]

    x = jumps  #[zeros(j_, k_) + jumps]
    return x
コード例 #10
0
ファイル: FitSkewtMLFP.py プロジェクト: s0ap/arpmRes
def FitSkewtMLFP(x, p):
    # This function estimates parameters [mu, sigma, alpha, nu] by minimizing
    # the relative entropy between the HFP-pdf specified by the inputs and the
    # univariate pdf of a Skew t-distribution.
    #  INPUTS
    #   x      :[vector](1 x t_end) time series of observations
    #   p      :[vector](1 x t_end) Flexible Probabilities profile
    #  OPS
    #   mu     :[scalar] location parameter
    #   sigma  :[scalar] dispersion parameter
    #   alpha  :[scalar] skew parameter
    #   nu     :[scalar] degrees of freedom

    # initial guess
    m, s2 = FPmeancov(x, p)
    parmhat = [m[0, 0], sqrt(s2[0, 0]), 0, 30]

    lb = [-np.inf, 0, -np.inf, 0]
    ub = [None, None, None, None]
    bound = list(zip(lb, ub))
    options = {'disp': True}
    # Minimize relative entropy
    res = minimize(RelativeEntropySkewt,
                   parmhat,
                   args=(x, p),
                   bounds=bound,
                   method='SLSQP',
                   tol=10**-8,
                   options=options)
    parmhat = res.x

    mu = parmhat[0]
    sigma = parmhat[1]
    alpha = parmhat[2]
    nu = parmhat[3]
    return mu, sigma, alpha, nu
コード例 #11
0
ファイル: S_SizeSignals.py プロジェクト: s0ap/arpmRes
t_smoo = 180
s_siz_smoo = zeros((n_, t_ - t_smoo + 1))

for t in range(t_smoo, s_siz.shape[1] + 1):
    s_siz_smoo[:, [t - t_smoo]] = EwmaFP(s_siz[:, t - t_smoo:t], tauHL_smoo)[0]
# -

# ## Use the smoothed signals to compute the scored signal

t_scor = 252
s_siz_scor = zeros((n_, s_siz_smoo.shape[1] - t_scor + 1))
tauHL_scor = log(2) / 120
p_scor = exp(-tauHL_scor * arange(t_scor - 1, 0 + -1, -1)).reshape(
    1, -1) / npsum(exp(-tauHL_scor * arange(t_scor - 1, 0 + -1, -1)))
for t in arange(t_scor, s_siz_smoo.shape[1] + 1):
    mu_siz, cov_siz = FPmeancov(s_siz_smoo[:, t - (t_scor):t], p_scor)
    s_siz_scor[:,
               t - t_scor] = (s_siz_smoo[:, t - 1] - mu_siz.flatten()) / sqrt(
                   diag(cov_siz))

# ## Use the scored signals to compute the ranked signals

s_siz_rk = zeros((n_, s_siz_scor.shape[1]))
for t in range(s_siz_scor.shape[1]):
    rk = argsort(s_siz_scor[:, t])
    rk_signal = argsort(rk) + 1
    s_siz_rk[:, t] = (rk_signal - 0.5 * n_) * (2 / n_)

# ## Compare the plots of one signal, one smoothed signal and one scored signal

dates = dates[t_start - 1:]
コード例 #12
0
sigma_delta = db_ImpliedVol_SPX.Sigma

implied_vol = sigma_delta[0, delta == 0.5,
                          1:]  # at the money option expiring in tau[0] years
prices = db_ImpliedVol_SPX.Underlying
logrets = diff(log(prices))
dates = db_ImpliedVol_SPX.Dates[1:]
dates = array([date_mtop(i) for i in dates])

t_ = len(dates)

lam = log(2) / 90  # exp decay probs, half life 3 months
FP = exp(-lam * arange(t_, 1 + -1, -1))
FP = (FP / npsum(FP)).reshape(1, -1)

m, s2 = FPmeancov(r_[logrets[np.newaxis, ...], implied_vol], FP)

# colors
c0 = [.9, .4, 0]
c1 = [.4, .4, 1]
c2 = [0.3, 0.3, 0.3]
myFmt = mdates.DateFormatter('%d-%b-%y')
# -

# ## Generate the figure

# +
date_tick = range(0, t_, 150)  # tick for the time axes
xticklabels = dates[date_tick]  # labels for dates

f = figure()
コード例 #13
0
p, _ = BlowSpinFP(epsi, b, s)
q_ = b + s

# ## Compute HFP-mean/cov and HBFP-mean/cov from original data

# +
print('Compute HFP - mean / cov and HBFP - mean / cov from original data')

mu_HFP = zeros((n_, 2))
mu_HBFP = zeros((n_, 2))
sigma2_HFP = zeros((n_, n_, 2))
sigma2_HBFP = zeros((n_, n_, 2))
p_HBFP = zeros(2)
v_HBFP = zeros(2)

mu_HFP[:, [0]], sigma2_HFP[:, :, 0] = FPmeancov(
    epsi, p)  # HFP mean and covariance from original data
mu_HBFP[:, 0], sigma2_HBFP[:, :, 0], p_HBFP[0], v_HBFP[0], _ = HighBreakdownFP(
    epsi, p.copy(), 1)  # HBFP mean and covariance from original data
# -

# ## Detect points outside the HBFP ellipsoid

# +
lev = 1.2
Diag_lambda2, e = eig(sigma2_HBFP[:, :, 0])
y = zeros((n_, t_))
ynorm = zeros((1, t_))

for t in range(t_):
    y[:, t] = solve(e @ sqrt(diagflat(Diag_lambda2)),
                    epsi[:, t] - mu_HBFP[:, 0])
コード例 #14
0
def EMalgorithmFP(epsi, FP, nu, tol):
    #Expectation-Maximization with Flexible Probabilities for Missing Values
    #under Student t assumption (nu degrees of freedom)
    # INPUT
    # epsi         : [matrix] (i_ x t_end) observations - with zeros's for missing values
    # FP           : [vector] (1 x t_end) flexible probabilities
    # nu           : [scalar] multivariate Student's t degrees of freedom
    # tol          : [scalar] or [vector] (2 x 1) tolerance, needed to check convergence of mu and sigma2 estimates
    # OP
    # mu           : [vector] (i_ x 1)  EMFP estimate of the location parameter
    # sigma2       : [matrix] (i_ x i_) EMFP estimate of the scatter matrix

    # For details on the exercise, see here .

    #tolerance: needed to check convergence
    if isinstance(tol, float) or len(tol) == 1:
        tol = [tol, tol]

    i_, t_ = epsi.shape

    #step0: initialize
    I = isnan(epsi)

    Data = epsi[:, npsum(I, axis=0) == 0]

    FPa = FP[[0], npsum(I, axis=0) == 0].reshape(1, -1)
    FPa = FPa / npsum(FPa)

    # HFP mu and sigma2 on available data
    m, s2 = FPmeancov(Data, FPa)
    # m = m[...,np.newaxis]
    s2 = s2[..., np.newaxis]

    w = ones((1, t_))

    Error = ones(len(tol)) * 10**6
    j = 0
    # start main loop
    gamma = {}
    while any(Error > tol):
        j = j + 1
        eps = zeros((epsi.shape[0], t_))
        for t in range(t_):
            gamma[t] = zeros((i_, i_))

            na = []
            for i in range(i_):
                if isnan(epsi[i, t]):
                    na = r_[na, i]  #non-available

            a = arange(i_)
            if isinstance(na, np.ndarray):
                if na.size > 0:
                    mask = np.ones(a.shape,
                                   dtype=bool)  # np.ones_like(a,dtype=bool)
                    na = list(map(int, na))
                    mask[na] = False
                    a = a[mask]  #available

            A = i_ - len(na)  #|available|

            eps[a, t] = epsi[a, t]
            eps[na, t] = epsi[na, t]

            #step1:

            #update weights
            invs2 = inv(s2[np.ix_(a, a, [j - 1])].squeeze())
            w[0, t] = (nu + A) / (nu + (eps[a, [t]] - m[a, [j - 1]]).T @ invs2
                                  @ (eps[a, [t]] - m[a, [j - 1]]))

            if na:
                #fill entries
                eps[na, t] = (m[na, [j - 1]] +
                              s2[np.ix_(na, a, [j - 1])].squeeze() @ invs2
                              @ (eps[a, [t]] - m[a, [j - 1]])).flatten()

                #fill buffer
                gamma[t][np.ix_(
                    na,
                    na)] = s2[np.ix_(na, na, [j - 1])].squeeze() - s2[np.ix_(
                        na, a, [j - 1])].squeeze() @ invs2 @ s2[np.ix_(
                            a, na, [j - 1])].squeeze()

        #step[1:] update output
        new_m = (eps @ (FP * w).T) / npsum(FP * w)
        m = r_['-1', m, new_m]
        gamma_p = zeros(gamma[0].shape + (t_, ))
        for t in range(t_):
            gamma_p[:, :, t] = gamma[t] * FP[0, t]
        new_s2 = (eps - tile(m[:, [j]], (1, t_))) @ (diagflat(
            FP * w)) @ (eps - tile(m[:, [j]], (1, t_))).T + npsum(gamma_p, 2)
        s2 = r_['-1', s2, new_s2[..., np.newaxis]]

        # step3: check convergence
        Error[0] = norm(m[:, j] - m[:, j - 1]) / norm(m[:, j - 1])
        Error[1] = norm(s2[:, :, j] - s2[:, :, j - 1], ord='fro') / norm(
            s2[:, :, j - 1], ord='fro')

        mu = m[:, -1]
        sigma2 = s2[:, :, -1]
    return mu, sigma2
コード例 #15
0
# +
# varnames_to_save = [x_1,j_,x_1hor,x_2,x_2hor,x_3,x_3hor,n_,n_grid,tau,eta,sigma_m ,maturity,m_grid,p,ens,sigma_m,dates]
# vars_to_save = {varname: var for varname, var in locals().items() if isinstance(var,(np.ndarray,np.float,np.int)) and varname in varnames_to_save}
# savemat(os.path.join(TEMPORARY_DB, 'db_ProjOptionsHFP'),vars_to_save)
# -

# ## Select the horizon for the plot select the log-underlying and the log- ATM 1yr impl vol compute the HFP mean and covariance

# +
x_1fixhor = x_1hor[:, [-1]]
mateq1 = where(maturity == 1)[0] + 1
mgrideq0 = where(m_grid == 0)[0] + 1
x_3fixhor = x_3hor[mateq1 * mgrideq0 - 1, :, [-1]].T

[mu_HFP, sigma2_HFP] = FPmeancov(r_['-1', x_1fixhor, x_3fixhor].T, p)

col = [0.94, 0.3, 0]
colhist = [.9, .9, .9]
# axis settings
x1_l = HFPquantile(x_1fixhor.T, array([[10**-6]]), p).squeeze()
x1_u = HFPquantile(x_1fixhor.T, array([[1 - 10**-6]]), p).squeeze()
x2_l = HFPquantile(x_3fixhor.T, array([[10**-6]]), p).squeeze()
x2_u = HFPquantile(x_3fixhor.T, array([[1 - 10**-6]]), p).squeeze()

f = figure()
grey_range = arange(0, 0.81, 0.01)
CM, C = ColorCodedFP(p, None, None, grey_range, 0, 1, [0.75, 0.25])
# colormap(CM)
option = namedtuple('option', 'n_bins')
option.n_bins = round(6 * log(ens))
コード例 #16
0
dt = 0.5
horiz_u = arange(0, t_end + dt, dt)
prices = Data.Prices
index_stock = 279  # selected stock
x = log(prices[
    [index_stock - 1],
    -1000:])  # risk drivers (log-values) take the last 1000 observations
x_ = x.shape[1]
epsi = diff(x, 1)  # invariants

lam = log(2) / 250  # half-life 1y
exp_decay = exp(-lam * (x_ - 1 - arange(0, x_ - 1, 1))).reshape(1, -1)
flex_probs_estimation = sort(
    exp_decay /
    npsum(exp_decay))  # sorted and normalized flexible probabilities
mu, var = FPmeancov(epsi, flex_probs_estimation)
sig = sqrt(var)
# -

# ## Simulate the risk driver as an arithmetic Brownian motion with drift using function SimulateBrownMot.

j_ = 9000
X = SimulateBrownMot(x[0, -1], horiz_u, mu, sig, j_)

# ## Compute the equity P&L, along with the mean and the standard deviation.

PL = exp(x[0, -1]) * (exp(X - x[0, -1]) - 1)
Mu_PL = exp(x[0, -1]) * (exp((mu + 0.5 * var) * horiz_u) - 1)
Sigma_PL = exp(x[0, -1]) * exp(
    (mu + 0.5 * var) * horiz_u) * sqrt(exp(horiz_u * var) - 1)
コード例 #17
0
dy = diff(y)
dx_HST = dx_HST[[0], s_:-s_]
x_HST = x_HST[[0], s_:-s_]

# - Fit the CIR process to by FitCIR_FP
t_obs = len(dy)  # time series len
p_HST = ones((1, t_obs)) / t_obs  # flexible probabilities
delta_t = 1  # fix the unit time-step to 1 day
par_CIR = FitCIR_FP(y[1:], delta_t, None, p_HST)
kappa = par_CIR[0]
y_bar = par_CIR[1]
eta = par_CIR[2]

# - Estimate the drift parameter and the correlation coefficient between the Brownian motions by FPmeancov
mu_HST, sigma2_x_HST = FPmeancov(
    r_[dx_HST[[0], 1:],
       dy.reshape(1, -1)], p_HST)  # daily mean vector and covariance matrix
mu_x_HST = mu_HST[0]  # daily mean
rho_HST = sigma2_x_HST[0, 1] / sqrt(
    sigma2_x_HST[0, 0] * sigma2_x_HST[1, 1])  # correlation parameter

# - Extract the invariants
epsi_x_HST = (dx_HST[[0], 1:] - mu_x_HST * delta_t) / sqrt(y[1:])
epsi_y = (dy + kappa * (y[1:] - y_bar) * delta_t) / (eta * sqrt(y[1:]))
epsi_HST = r_[epsi_x_HST, epsi_y.reshape(1, -1)]
# -

# ## Extract the invariants for the MVOU process as follows:
# ## - Compute the 2-year and 7-year yield to maturity by RollPrices2YieldToMat and obtain the corresponding shadow rates by InverseCallTransformation
# ## Select the two-year and seven-year key rates and estimate the MVOU process
# ## parameters using functions FitVAR1 and VAR1toMVOU.
コード例 #18
0
# ## Compute HFP-ellipsoid and HFP-histogram

# +
q_ = b + s
mu_HFP = zeros(
    (2,
     q_))  # array containing the mean vector for each one of the q_ profiles
sigma2_HFP = zeros(
    (2, 2, q_)
)  # array containing the covariance matrix for each one of the q_ profiles
z_2 = zeros((q_, t_))
mu_z2 = zeros((1, q_))

for q in range(q_):
    mu_HFP[:, [q]], sigma2_HFP[:, :, q] = FPmeancov(epsi, p[[q], :])
    for t in range(t_):
        z_2[q, t] = (epsi[:, t] - mu_HFP[:, q]).T @ solve(
            n_ * sigma2_HFP[:, :, q], epsi[:, t] - mu_HFP[:, q])
    mu_z2[0, q] = p[q, :] @ z_2[q, :].T
# -

# ## Generate some figures showing how the HFP-ellipsoid evolves as the FP profile changes

# +
grey_range = arange(0, 0.81, 0.01)
q_range = array([1, 99])
date_dt = array([date_mtop(i) for i in date])
myFmt = mdates.DateFormatter('%d-%b-%Y')

for q in range(q_):
コード例 #19
0
x = Rates[0]  # select time series corresponding to 1y par rates
dates = Dates
dx = diff(x)

# ## Fit the parameters of fractional Brownian motion

# +
lags = 50
d0 = 0

d = FitFractionalIntegration(dx, lags, d0)[0]
h = d + 0.5  # Hurst coefficient

t_ = len(dx)
[mu, sigma2] = FPmeancov(dx.reshape(1, -1), ones((1, t_)) / t_)
# -

# ## Initialize projection variables

tau = 252  # investment horizon of 1 year (expressed in days)
dt = 1  # infinitesimal step for simulations
t_j = arange(0, tau + dt, dt)
j_ = 15  # number of simulated paths

# ## Simulate paths
h = 0.091924700639547 + 0.5

# +
dW = ffgn(h, j_, len(t_j) - 1)
W = r_['-1', zeros((j_, 1)), cumsum(dW, 1)]
コード例 #20
0
typ = namedtuple('type','Entropy')
typ.Entropy = 'Exp'
ens1 = EffectiveScenarios(p1, typ)
# generalized method of moments
Parameters = IterGenMetMomFP(dn, p1, 'Poisson')
lam = Parameters.lam

# ## Fit jumps to an exponential distribution

# exponential decay FP
lam2 = log(2) / round(100*lam)
p2 = exp(-lam2 * arange(dq.shape[1],0,-1)).reshape(1,-1)
p2 = p2 / npsum(p2)  # FP-profile: exponential decay 1 years
ens2 = EffectiveScenarios(p2, typ)
# compute FP-mean and variance of an exponential distribution
mu_dq, _ = FPmeancov(dq, p2)
sigma2_dq = mu_dq ** 2

# ## Compute expectation and variance of the compound Poisson process

# +
mu = lam*mu_dq
sigma2 = lam*sigma2_dq
sigma = sqrt(sigma2)

# project to future times
mu_tau = mu*t_j
sigma_tau = sigma*sqrt(t_j)
# -

# ## Simulate the compound Poisson process
コード例 #21
0
    mu[i], sig2[i],_ = MaxLikelihoodFPLocDispT(epsi[[i],:], p_t, nu_marg, 10 ** -6, 1)
    epsi_t[i, :] = (epsi[i, :] - mu[i]) / sqrt(sig2[i])
    u[i, :] = tstu.cdf(epsi_t[i, :], nu_marg)
# -

# ## Estimate the correlation of the t-copula
# ## map observations into copula realizations

# +
nu = 5
c = zeros((u.shape))
for i in range(bonds_i_):
    c[i,:] = tstu.ppf(u[i, :], nu)

    # estimate the correlation matrix
[_, s2_hat] = FPmeancov(c, ones((1, t_)) / t_)
c2 = np.diagflat(1 / sqrt(diag(s2_hat)))@[email protected](1 / sqrt(diag(s2_hat)))
# -

# ## Factor analysis

# +
k_LRD = 1  # one factor
c2_LRD, beta,*_ = FactorAnalysis(c2, array([[0]]), k_LRD)
c2_LRD, beta = np.real(c2_LRD), np.real(beta)
c2_credit = np.diagflat(diag(c2_LRD) ** (-1 / 2))@[email protected](diag(c2_LRD) ** (-1 / 2))
sig_credit = sqrt(diag(eye(c2_credit.shape[0]) - [email protected]))

Transitions.beta = beta
Transitions.c2_diag = diag(diag(eye((n_issuers)) - [email protected]))
Transitions.n_issuers = n_issuers
コード例 #22
0
# -

# ## Compute residuals

# +
[n_, k_, t_] = beta.shape

U = zeros((n_, t_))
for t in range(t_):
    U[:,t] = X_shift[:,t] - beta[:,:, t]@Z[:, t]
# -

# ## Residuals analysis

# ## compute statistics of the joint distribution of residuals and factors
m_UZ, s2_UZ = FPmeancov(r_[U,Z], ones((1, t_)) / t_)

# ## compute correlation matrix

# +
sigma = sqrt(diag(s2_UZ))
c2_UZ = np.diagflat(1 / sigma)@[email protected](1 / sigma)

c_UZ = c2_UZ[:n_, n_ :n_ + k_]
c2_U = tril(c2_UZ[:n_, :n_], -1)
# -

# ## Plot (untruncated) correlations among residuals

# +
# reshape the correlations in a column vector
コード例 #23
0
rho = 0.95  # correlation
s2 = np.diagflat(svec) @ array([[1, rho], [rho, 1]]) @ np.diagflat(
    svec)  # covariance matrix
# -

# ## Generate bivariate lognormal draws

Y = exp(NormalScenarios(mu, s2, j_, 'Riccati')[0])
X = Y[[0]]
Z = Y[[1]]

# ## Compute the sample of innovation

Psi = NormInnov(log(r_[X, Z]), mu, svec, rho)
p = ones((1, j_)) / j_
mu_ZPsi, s2_ZPsi = FPmeancov(r_[Z, Psi],
                             p)  # expectation and covariance of Z and Psi

# ## Visualize empirical pdf of innovation

# +
nbins = round(7 * log(j_))
figure()

p = ones((1, Psi.shape[1])) / Psi.shape[1]
option = namedtuple('option', 'n_bins')

option.n_bins = nbins
[n, psi] = HistogramFP(Psi, p, option)
bar(psi[:-1],
    n[0],
    width=psi[1] - psi[0],
コード例 #24
0
SPX_thor = db['SPX_thor']
htilde = db['htilde']
p = db['p']
n_ = db['n_']
j_ = db['j_']
Pi = db['Pi']
# -

# ## Compute the scenario-probaility mean and covariance of the ex-ante return by using function FPmeancov.
# ## Then, compute the expected value, the variance and the standard deviation

# +
Y_htilde = Y_htilde.reshape(1, -1)
p = p.reshape(1, -1)

[mu_Y, s2_Y] = FPmeancov(Y_htilde, p)

Satis = namedtuple(
    'Satis',
    'E_Y variance stdev mv_2 mv mv_Hess mv_new msd msvcq cq_grad ce_erf '
    'Bulhmann_expectation Esscher_expectation')
Satisf = namedtuple('Satisf', 'mv_grad PH VaR')
Risk = namedtuple(
    'risk',
    'variance stdev mv_2 mv mv_grad mv_Hess mv_new msd msv PH VaR cq cq_grad')

# expected value
Satis.E_Y = mu_Y

# variance
Risk.variance = s2_Y
コード例 #25
0
X_u = log(tile(v_t, (1, j_))) + C
# -

# ## Pricing: compute the scenarios for the P&L of each stock by full repricing
# ## scenarios for prices tomorrow

# +
V_u = exp(X_u)

# P&L's scenarios
Pi = V_u - tile(v_t, (1, j_))
# -

# ## Compute HFP-covariance

m_Pi_HFP, s2_Pi_HFP = FPmeancov(Pi, p)
s_Pi_HFP = sqrt(diag(s2_Pi_HFP))

# ## Compute the optimal portfolio with the HFP-covariance of the P&L's

# +
a = m_Pi_HFP - r * v_t  # instruments' excess performance

# compute the inverse of s2_Pi

inv_s2_Pi_HFP = solve(s2_Pi_HFP, eye(s2_Pi_HFP.shape[0]))
# t_HFP = toc

# compute optimal portfolio with HFP covariance
h_star_HFP = a_p * (inv_s2_Pi_HFP @ a) / (a.T @ inv_s2_Pi_HFP @ a)
# -
コード例 #26
0
ens = exp(npsum(-p * log(p), 1,
                keepdims=True))  # compute the effective number of scenarios
# -

# ## Detect the worst outlier for each FP profile then compute HFP mean and covariance

t_tilde = zeros(q_, dtype=int)
mu_out = zeros((n_, q_))
sigma2_out = zeros((n_, n_, q_))
for q in range(q_):
    t_tilde[q] = FarthestOutlier(
        epsi, p[[q], :])  # where the time subscript of the worst outlier
    # compute historical mean and covariance of the dataset without outlier
    epsi_temp = np.delete(epsi, t_tilde[q], axis=1)
    p_temp = np.delete(p[[q], :], t_tilde[q], axis=1)
    [mu_out[:, [q]], sigma2_out[:, :, q]] = FPmeancov(epsi_temp,
                                                      p_temp / npsum(p_temp))

# ## Generate static figures showing how the detected outlier changes along with the FP profile considered

# +
greyrange = arange(0.1, 0.91, 0.01)
date_dt = array([date_mtop(i) for i in date])
myFmt = mdates.DateFormatter('%d-%b-%Y')

t_new = len(date_dt)
epslim1 = [min(epsi[0]) - .3, max(epsi[0]) + .3]
epslim2 = [min(epsi[1]) - .3, max(epsi[1]) + .3]

for q in range(q_):
    f = figure()
コード例 #27
0
delta_t = 1  # fix the unit time-step to 1 day

par_CIR = FitCIR_FP(y[0, -t_obs:], delta_t, None, p2)

kappa = par_CIR[0]
y_ = par_CIR[1]
eta = par_CIR[2]
# -

# ## Estimate mu (drift parameter of X) and rho (correlation between Brownian motions)

# +
dy = diff(y)
xy = r_[dx[-t_obs:].reshape(1, -1), dy[:, -t_obs:]]
[mu_xy, sigma2_xy] = FPmeancov(xy,
                               p2)  # daily mean vector and covariance matrix

mu = mu_xy[0]  # daily mean
rho = sigma2_xy[0, 1] / sqrt(
    sigma2_xy[0, 0] * sigma2_xy[1, 1])  # correlation parameter
# -

# ## Compute analytical variance at horizon tau via characteristic function

# +
omega, x1, x2, x3, x4, x5, x6, x7, tau = symbols(
    'omega x1 x2 x3 x4 x5 x6 x7 tau')

f = HestonChFun(omega / I, x1, x2, x3, x4, x5, x6, x7, tau)
mu1 = sympy.diff(f, omega, 1)
mu2 = sympy.diff(f, omega, 2)
コード例 #28
0
sys.path.append(path.abspath('../../functions-legacy'))

from numpy import array, mean, exp, sqrt

import matplotlib.pyplot as plt

plt.style.use('seaborn')

from FPmeancov import FPmeancov

# parameters
Y1 = array([[1],[0], [- 1]])
Y2 = exp(Y1)  # Y1 and Y2 are co-monotonic
p = array([[1,1,1]]).T / 3
# -

# ## Compute the mean-lower partial moment trade-offs of Y1, Y2 and Y1+Y2.
# ## The expectations of Y1, Y2 and Y1+Y2 are obtained using function
# ## FPmeancov

# +
Y1_ = Y1 - FPmeancov(Y1.T,p)[0]
Y2_ = Y2 - FPmeancov(Y2.T,p)[0]
Y12_ = (Y1 + Y2) - FPmeancov(Y1.T+Y2.T,p)[0]

mlpm_Y1 = mean(Y1) - sqrt(((Y1_ ** 2) * (Y1_ < 0)).T@p)  # mean-lower partial moment trade-off of Y1
mlpm_Y2 = mean(Y2) - sqrt(((Y2_ ** 2) * (Y2_ < 0)).T@p)  # mean-lower partial moment trade-off of Y2
mlpm_Ysum = mlpm_Y1 + mlpm_Y2  # sum of the two mean-lower partial moment trade-offs
mlpm_Y12 = mean(Y1 + Y2) - sqrt((((Y12_) ** 2) * (Y12_ < 0)).T@p)  # mean-lower partial moment trade-off of Y1+Y2
コード例 #29
0
def RobustLassoFPReg(X,
                     Z,
                     p,
                     nu,
                     tol,
                     lambda_beta=0,
                     lambda_phi=0,
                     flag_rescale=0):
    # Robust Regression - Max-Likelihood with Flexible Probabilites & Shrinkage
    # (multivariate Student t distribution with given degrees of freedom = nu)
    #  INPUTS
    #   X             : [matrix] (n_ x t_end ) historical series of dependent variables
    #   Z             : [matrix] (k_ x t_end) historical series of independent variables
    #   p             : [vector] flexible probabilities
    #   nu            : [scalar] multivariate Student's t degrees of freedom
    #   tol           : [scalar] or [vector] (3 x 1) tolerance, needed to check convergence
    #   lambda_beta  : [scalar] lasso regression parameter
    #   lambda_phi    : [scalar] graphical lasso parameter
    #   flag_rescale  : [boolean flag] if 0 (default), the series is not rescaled
    #
    #  OPS
    #   alpha_RMLFP   : [vector] (n_ x 1) shifting term
    #   beta_RMLFP    : [matrix] (n_ x k_) optimal loadings
    #   sig2_RMLFP    : [matrix] (n_ x n_) matrix of residuals.T covariances

    # For details on the exercise, see here .

    ## Code
    [n_, t_] = X.shape
    k_ = Z.shape[0]

    # if FP are not provided, observations are equally weighted
    if p is None:
        p = ones((1, t_)) / t_
    # adjust tolerance input
    if isinstance(tol, float):
        tol = [tol, tol, tol]

    # rescale variables
    if flag_rescale == 1:
        _, cov_Z = FPmeancov(Z, p)
        sig_Z = sqrt(diag(cov_Z))
        _, cov_X = FPmeancov(X, p)
        sig_X = sqrt(diag(cov_X))
        Z = np.diagflat(1 / sig_Z) @ Z
        X = np.diagflat(1 / sig_X) @ X

    # initialize variables
    alpha = zeros((n_, 1))
    beta = zeros((n_, k_, 1))
    sig2 = zeros((n_, n_, 1))

    # 0. Initialize
    alpha[:, [0]], beta[:, :, [0]], sig2[:, :,
                                         [0]], U = LassoRegFP(X, Z, p, 0, 0)

    error = ones(3) * 10**6
    maxIter = 500
    i = 0
    while any(error > tol) and (i < maxIter):
        i = i + 1

        # 1. Update weights
        z2 = np.atleast_2d(U).T @ (solve(sig2[:, :, i - 1], np.atleast_2d(U)))
        w = (nu + n_) / (nu + diag(z2).T)

        # 2. Update FP
        p_tilde = (p * w) / npsum(p * w)

        # 3. Update output
        # Lasso regression
        new_alpha, new_beta, new_sig2, U = LassoRegFP(X, Z, p_tilde,
                                                      lambda_beta)
        new_beta = new_beta.reshape(n_, k_, 1)
        new_sig2 = new_sig2.reshape(n_, n_, 1)
        U = U.squeeze()
        alpha = r_['-1', alpha, new_alpha]
        beta = r_['-1', beta, new_beta]
        sig2 = r_['-1', sig2, new_sig2]
        sig2[:, :, i] = npsum(p * w) * sig2[:, :, i]
        # Graphical lasso
        if lambda_phi != 0:
            sig2[:, :, i], _, _, _ = graph_lasso(sig2[:, :, i], lambda_phi)

        # 3. Check convergence
        error[0] = norm(alpha[:, i] - alpha[:, i - 1]) / norm(alpha[:, i - 1])
        error[1] = norm(beta[:, :, i] - beta[:, :, i - 1], ord='fro') / norm(
            beta[:, :, i - 1], ord='fro')
        error[2] = norm(sig2[:, :, i] - sig2[:, :, i - 1], ord='fro') / norm(
            sig2[:, :, i - 1], ord='fro')

    # Output
    alpha_RMLFP = alpha[:, -1]
    beta_RMLFP = beta[:, :, -1]
    sig2_RMLFP = sig2[:, :, -1]

    # From rescaled variables to non-rescaled variables
    if flag_rescale == 1:
        alpha_RMLFP = diag(sig_X) @ alpha_RMLFP
        beta_RMLFP = diag(sig_X) @ beta_RMLFP @ diag(1 / sig_Z)
        sig2_RMLFP = diag(sig_X) @ sig2_RMLFP @ diag(sig_X).T
    return alpha_RMLFP, beta_RMLFP, sig2_RMLFP
コード例 #30
0
# flexible prob.
lam = (log(2)) / 120  # half life 4 months
flex_prob = exp(-lam * arange(t_, 1 + -1, -1)).reshape(1, -1)
flex_prob = flex_prob / npsum(flex_prob)

typ = namedtuple('typ', 'Entropy')
typ.Entropy = 'Exp'
ens = EffectiveScenarios(flex_prob, typ)
# -

# ## Twist fix for non-synchroneity in HFP

# +
print('Performing the twist fix for non-synchroneity')
# (step 1-2) HFP MEAN/COVARIANCE/CORRELATION
HFPmu, HFPcov = FPmeancov(epsi, flex_prob)
HFPc2 = np.diagflat(diag(HFPcov)**(-1 / 2)) @ HFPcov @ np.diagflat(
    diag(HFPcov)**(-1 / 2))

# (step 3) TARGET CORRELATIONS
l = 10  # number of lags

flex_prob_l = flex_prob[[0], l:]
flex_prob_l = flex_prob_l / npsum(flex_prob_l)

# concatenate the daily log-returns
y1, y2 = zeros(t_), zeros(t_)
for t in range(l, t_):
    y1[t] = sum(ret1[0, t - l:t])
    y2[t] = sum(ret2[0, t - l:t])