Exemple #1
0
def CalibDegOfFreedomMLFP(x, FP, maxdf, stepdf):
    # Student t model
    # MLFP for mu and sigma on a grid of degrees of freedom (df) the best fit
    # corresponds to df which gives rise to the highest (log)likelihood L)
    #  INPUTS
    # x       :[vector](1 x t_end) empirical realizations
    # FP      :[vector](1 x t_end) flexible probabilities associated with vector x
    # maxdf   :[scalar] maximum value for nu to be checked
    # stepdf  :[scalar] step between consecutive values of nu to be checked
    #  OPS
    # mu      :[scalar] estimated location parameter
    # sig2    :[scalar] estimated dispersion parameter
    # nu      :[scalar] best degrees of freedom nu

    ## Code
    df = arange(1, maxdf + stepdf, stepdf)
    Tol = 10**(-6)
    l_ = len(df)

    Mu = zeros((l_, 1))
    Sigma2 = zeros((l_, 1))
    L = zeros((l_, 1))
    for i in range(l_):
        Mu[i], Sigma2[i], _ = MaxLikelihoodFPLocDispT(x, FP, df[i], Tol, 1)
        L[i] = FP @ log(
            t.pdf((x - Mu[i]) / sqrt(Sigma2[i]), df[i]) / sqrt(Sigma2[i])).T

    imax = np.argmax(L)

    mu = Mu[imax]
    sig2 = Sigma2[imax]
    nu = df[imax]

    return mu, sig2, nu
Exemple #2
0
sigma2 = sigma_temp.T@sigma_temp  # covariance matrix of dimension (i_ x i_)

epsi_temp = mvtrvs(zeros(sigma2.shape[0]), sigma2, nu, t_).T

epsi = diagflat(sqrt(diag(sigma2)))@epsi_temp + tile(mu, (1, t_))  # dataset of dimension (i_ x t_end) from a t() distribution
# -

# ## Set the Flexible Probability profile for MLFP estimation (exponential decay with half life 12 months)

lam = log(2) / 360
p = exp(-lam*arange(t_, 1 + -1, -1)).reshape(1,-1)
p = p /npsum(p)

# ## Compute MLFP estimators of location and dispersion from the sample

mu_MLFP, sigma2_MLFP, err1 = MaxLikelihoodFPLocDispT(epsi, p, nu, 10**-15, 1)

# ## Define the likelihood function

# +
mvt = mvd.MVT(array([0]),array([[1]]),df=nu)
mvtpdf = mvt.pdf

likelihood = lambda theta: npsum(p * np.real(log((mvtpdf((epsi - tile(theta[:i_], (1, t_))).T@diagflat(
    1 / sqrt(reshape(theta[i_:i_*(1 + i_)], (i_, -1),'F').astype(np.complex128))))).astype(np.complex128).T)))
# -

# ## Compute the Hessian matrix

hessian, err2 = numHess(likelihood, r_[mu_MLFP[...,newaxis],sigma2_MLFP])
Exemple #3
0
# ## Standardize the invariants

# +
nu_marg = 6
tauHL_prior = 252*3  # 3 years
# set FP
p_t = exp(-(log(2) / (tauHL_prior + round(10*(rand() - 0.5)))*abs(arange(t_, 1 + -1, -1)))).reshape(1,-1)  # FP setting for every invariants separately
p_t = p_t / npsum(p_t)

mu = zeros((bonds_i_, 1))
sig2 = zeros((bonds_i_, 1))
epsi_t = zeros((epsi.shape))
u = zeros((epsi.shape))
for i in range(bonds_i_):
    mu[i], sig2[i],_ = MaxLikelihoodFPLocDispT(epsi[[i],:], p_t, nu_marg, 10 ** -6, 1)
    epsi_t[i, :] = (epsi[i, :] - mu[i]) / sqrt(sig2[i])
    u[i, :] = tstu.cdf(epsi_t[i, :], nu_marg)
# -

# ## Estimate the correlation of the t-copula
# ## map observations into copula realizations

# +
nu = 5
c = zeros((u.shape))
for i in range(bonds_i_):
    c[i,:] = tstu.ppf(u[i, :], nu)

    # estimate the correlation matrix
[_, s2_hat] = FPmeancov(c, ones((1, t_)) / t_)
epsi = diff(y, 1, 1)  # rate daily changes

# ## Maximum Likelihood with Flexible Probabilities (MLFP) Student t fit

# +
# degrees of freedom
nu = 5

# flexible probabilities (exponential decay half life 6 months)
lam = log(2) / 180
p = exp(-lam * arange(t_ - 1, 1 + -1, -1)).reshape(1, -1)
p = p / npsum(p)

# Fit
tolerance = 10**(-10)
mu_MLFP, sigma2_MLFP, _ = MaxLikelihoodFPLocDispT(epsi, p, nu, tolerance, 1)

# Student t mean and covariance
m_MLFP = mu_MLFP
s2_MLFP = nu / (nu - 2) * sigma2_MLFP
# -

# ## Create figures

# +
CM, C = ColorCodedFP(p, npmin(p), npmax(p), arange(0, 0.8, 0.005), 0, 1,
                     [1, 0])

f = figure()
# colormap(CM)
scatter(epsi[0], epsi[1], 10, c=C, marker='.',
# Flexible Probabilities
p = ConditionalFP(conditioner, prior)
# ## Fit the t copula
# ## estimate marginal distributions by fitting a Student t distribution via
# ## MLFP and recover the invariants' grades
u = zeros((i_, t_))
epsi = sort(
    epsi, 1
)  # We sort scenario in ascending order (in order to apply CopMargComb later)
for i in range(i_):
    mu_nu = zeros(nu_)
    sig2_nu = zeros(nu_)
    like_nu = zeros(nu_)
    for k in range(nu_):
        nu_k = nu_vec[k]
        mu_nu[k], sig2_nu[k], _ = MaxLikelihoodFPLocDispT(
            epsi[[i], :], p, nu_k, 10**-6, 1)
        epsi_t = (epsi[[i], :] - mu_nu[k]) / sqrt(sig2_nu[k])
        like_nu[k] = npsum(
            p * log(t.pdf(epsi_t, nu_k) / sqrt(sig2_nu[k])))  # likelihood
        j_nu = argsort(like_nu)[::-1]
        # take as estimates the parameters giving rise to the highest likelihood
    nu_marg = max(nu_vec[j_nu[0]], 10)
    mu_marg = mu_nu[j_nu[0]]
    sig2_marg = sig2_nu[j_nu[0]]
    u[i, :] = t.cdf((epsi[i, :] - mu_marg) / sqrt(sig2_marg), nu_marg)
# Map the grades into standard Student t realizations
epsi_tilde = zeros((i_, t_))
for i in range(i_):
    epsi_tilde[i, :] = t.ppf(u[i, :], nu)

# fit the ellipsoid via MLFP
nu_MLFP = zeros((1, i_))

# flexible probabilities
lam = log(2) / tau_HL
p = exp((-lam * arange(t_, 1 + -1, -1))).reshape(1, -1)
p = p / npsum(p)

# estimate marginal distributions
for i in range(i_):

    mu_nu = zeros((1, nu_))
    sig2_nu = zeros((1, nu_))
    like_nu = zeros((1, nu_))
    for j in range(nu_):
        nu = nu_vec[j]
        mu_nu[0, j], sig2_nu[0, j], _ = MaxLikelihoodFPLocDispT(
            epsi[[i], :], p, nu, 10**-6, 1)
        epsi_t = (epsi[i, :] - mu_nu[0, j]) / sqrt(sig2_nu[0, j])
        like_nu[0, j] = sum(
            p[0] *
            log(t.pdf(epsi_t, nu) / sqrt(sig2_nu[0, j])))  # Log-likelihood

    j_nu = argsort(like_nu[0])[::-1]
    nu_MLFP[0, i] = nu_vec[j_nu[0]]
    mu_MLFP[0, i] = mu_nu[0, j_nu[0]]
    sig2_MLFP[0, i] = sig2_nu[0, j_nu[
        0]]  # Take as estimates the one giving rise to the highest log-likelihood
# -

# ## Recover the time series of standardized uniform variables

u = zeros((i_, t_))
Exemple #7
0
q2 = percentile(epsi, 75, axis=1, keepdims=True)
interq_range = q2 - q1
epsi_rescaled = epsi / tile(interq_range, (1, t_obs))

# STEP 1: Invariants grades
epsi_grid, u_grid, grades = CopMargSep(epsi_rescaled, p)
nu = 4

# STEP [1:] Marginal t
epsi_st = zeros(epsi.shape)
for i in range(i_):
    epsi_st[i, :] = t.ppf(grades[i, :], nu)

# STEP 3: Fit ellipsoid (MLFP ellipsoid under Student t assumption)
Tol = 10**-6
mu_epsi, sigma2_epsi, _ = MaxLikelihoodFPLocDispT(epsi_st, p, nu, Tol, 1)

# STEP 4: Shrinkage (we don't shrink sigma2)

# STEP 5: Correlation
c2_hat = np.diagflat(diag(sigma2_epsi)**(-1 / 2)) @ sigma2_epsi @ np.diagflat(
    diag(sigma2_epsi)**(-1 / 2))

# Rescale back the invariants'o the original size
epsi_grid = epsi_grid * tile(interq_range, (1, t_obs))
# -

# ## Marginal distributions: HFP distributions for epsi_HST and epsi_MVOU parametric VG distribution for epsi_VG

# +
marginals_grid = r_[epsi_grid[:4, :], shifted_epsi_grid_vg.reshape(1, -1)]
Exemple #8
0
conditioner.TargetValue = np.atleast_2d(z_vix_star)
conditioner.Leeway = alpha
p = ConditionalFP(conditioner, prior)

# ## Estimate the marginal distributions

nu_marg_SPX = zeros(i_)
mu_marg_SPX = zeros(i_)
sig2_marg_SPX = zeros(i_)
for i in range(i_):
    mu_nu = zeros(nu_)
    sig2_nu = zeros(nu_)
    like_nu = zeros(nu_)
    for k in range(nu_):
        nu = nu_vec[k]
        mu_nu[k], sig2_nu[k], _ = MaxLikelihoodFPLocDispT(
            epsi_SPX[[i], :], p, nu, 10**-6, 1)
        epsi_t = (epsi_SPX[i, :] - mu_nu[k]) / sqrt(sig2_nu[k])
        like_nu[k] = npsum(p * log(tstu.pdf(epsi_t, nu) / sqrt(sig2_nu[k])))

    k_nu = argsort(like_nu)[::-1]
    nu_marg_SPX[i] = max(nu_vec[k_nu[0]], 10)
    mu_marg_SPX[i] = mu_nu[k_nu[0]]
    sig2_marg_SPX[i] = sig2_nu[k_nu[0]]

# ## Compute the historical distribution of the invariants' copula

u_SPX = zeros((i_, t_))
for i in range(i_):
    u_SPX[i, :] = tstu.cdf(
        (epsi_SPX[i, :] - mu_marg_SPX[i]) / sqrt(sig2_marg_SPX[i]),
        nu_marg_SPX[i])
conditioner.TargetValue = np.atleast_2d(z_vix_star)
conditioner.Leeway = alpha

p = ConditionalFP(conditioner, prior)

# marginal distribution fit
nu_marg = zeros(i_)
mu_marg = zeros(i_)
sig2_marg = zeros(i_)
for i in range(i_):
    mu_nu = zeros(nu_)
    sig2_nu = zeros(nu_)
    like_nu = zeros((1, nu_))
    for k in range(nu_):
        nu = nu_vec[k]
        mu_nu[k], sig2_nu[k], _ = MaxLikelihoodFPLocDispT(
            epsi_stocks[[i], :], p, nu, 10**-6, 1)
        epsi_t = (epsi_stocks[i, :] - mu_nu[k]) / sqrt(sig2_nu[k])
        like_nu[0, k] = npsum(p * log(tstu.cdf(epsi_t, nu) / sqrt(sig2_nu[k])))

    k_nu = argsort(like_nu[0])[::-1]
    nu_marg[i] = max(nu_vec[k_nu[0]], 10)
    mu_marg[i] = mu_nu[k_nu[0]]
    sig2_marg[i] = sig2_nu[k_nu[0]]

# Realized marginals mapping into standard Student t realizations
u_stocks = zeros((i_, t_))
epsi_tilde_stocks = zeros((i_, t_))
for i in range(i_):
    # u_stocks([i,:])=min((t.cdf((epsi_stocks[i,:]-mu_marg[i])/sqrt(sig2_marg[i]),nu_marg[i]),0.999))
    u_stocks[i, :] = tstu.cdf(
        (epsi_stocks[i, :] - mu_marg[i]) / sqrt(sig2_marg[i]), nu_marg[i])
Exemple #10
0
                            StocksSPX.Dividends[25])  # Cisco Systems Inc
x = x[[0], 1:]
t_ = x.shape[1]
# -

# ## Set the Flexible Probabilities

lam = log(2) / 800
p = exp((-lam * arange(t_, 1 + -1, -1))).reshape(1, -1)
p = p / npsum(p)  # FP-profile: exponential decay

# ## Fit the data to a Cauchy distribution

tol = 10**-6
nu = 1
mu, sigma2, _ = MaxLikelihoodFPLocDispT(dx, p, nu, tol, 1)
sigma = sqrt(sigma2)  # interquantile range corresponding to levels 1/4 and 3/4
mu = mu.squeeze()
sigma2 = sigma2.squeeze()
sigma = sigma.squeeze()

# ## Initialize projection variables

tau = 10
dt = 1 / 20
t_j = arange(0, tau + dt, dt)
j_ = 15

# ## Simulate paths

X = PathsCauchy(x[0, t_ - 1], mu, sigma, t_j, j_)
Exemple #11
0
conditioner.Series = z_vix_cond.reshape(1, -1)
conditioner.TargetValue = np.atleast_2d(z_vix_star)
conditioner.Leeway = alpha
p = ConditionalFP(conditioner, prior)

# map invariants into student t realizations
nu_marg = r_[nu_marg, nu_marg_SPX]
mu_marg = r_[mu_marg, mu_marg_SPX]
sig2_marg = r_[sig2_marg, sig2_marg_SPX]
epsi_tilde = zeros((i_, t_))
for i in range(i_):
    u = t.cdf((epsi[i, :] - mu_marg[i]) / sqrt(sig2_marg[i]), nu_marg[i])
    epsi_tilde[i, :] = t.ppf(u, nu_joint)

# estimate joint correlation
_, sig2, _ = MaxLikelihoodFPLocDispT(epsi_tilde, p, nu_joint, 10**-6, 1)
c = np.diagflat(diag(sig2)**(-1 / 2)) @ sig2 @ np.diagflat(
    diag(sig2)**(-1 / 2))

# replace the correlation block related to stocks with its low-rank-diagonal
# approximation
c_stocks, beta_stocks, *_ = FactorAnalysis(
    c[i_SPX:i_SPX + i_stocks, i_SPX:i_SPX + i_stocks], array([[0]]), k_)
c_stocks, beta_stocks = np.real(c_stocks), np.real(beta_stocks)
c_SPX_stocks = c[:i_SPX, i_SPX:i_SPX + i_stocks]
c_SPX = c[:i_SPX, :i_SPX]
# -

# ## Perform Hybrid Monte-Carlo historical projection on the grades for each node path

Epsistocks_tilde_hor = zeros((i_stocks, U_stocks_hor.shape[2]))
Exemple #12
0
sigma2 = 2  # square dispersion parameter
sigma = sqrt(sigma2)
threshold = 1e-4
last = 1
# -

# ## Generate the observation of the Student t with 3 degree of freedom, location parameter 0 and dispersion parameter 2

Epsi_std = t.rvs(nu, size=(1, t_))
Epsi = mu + sigma * Epsi_std  # Affine equivariance property
x = linspace(npmin(Epsi_std), npmax(Epsi_std), t_ + 1)

# ## Compute the Maximum Likelihood location and dispersion parameters

p = (1 / t_) * ones((1, t_))  # probabilities
mu_ML, sigma2_ML, _ = MaxLikelihoodFPLocDispT(Epsi, p, nu, threshold, last)

# ## Compute the Maximum Likelihood pdf

sigma_ML = sqrt(sigma2_ML)
fML_eps = t.pdf((x - mu_ML) / sigma_ML, nu)

# ## Compute the Maximum Likelihood cdf

FML_eps = t.cdf((x - mu_ML) / sigma_ML, nu)

# ## Compute the true  pdf and cdf

f_eps = t.pdf((x - mu) / sigma, nu)

# ## Compute the true cdf