def NormalCopulaPdf(u, mu, sigma2): # This function computes the pdf of the copula of a multivariate Normal # distribution at a generic point u in the unit hypercube # INPUTS # u : [vector] (n_ x 1) point in the unit hypercube # mu : [vector] (n_ x 1) vector of expectation # sigma2 : [matrix] (n_ x n_) symmetric and positive covariance matrix # OPS # f_U : [scalar] pdf of the copula at u # For details on the exercise, see here . ## Code # Compute the inverse marginal cdf's sigvec = sqrt(diag(sigma2)) x = norm.ppf(u.flatten(), mu.flatten(), sigvec).reshape(-1,1) # Compute the joint pdf n_ = len(u) f_X = (2*pi)**(-n_ / 2)*((det(sigma2))**(-.5))*exp(-0.5 * (x - mu).T@(solve(sigma2,(x - mu)))) # Compute the marginal pdf's f_Xn = norm.pdf(x.flatten(), mu.flatten(), sigvec) # Compute the pdf of the copula f_U = squeeze(f_X/prod(f_Xn)) return f_U
def __c_1_lambda_quasi(_lambda, dim, ecdf): from scipy.stats import norm seq = gh.Halton(int(dim)) # for memory concern proj_max = zeros(n_sample_per_cycle) for i in range(n_sample): # Halton sequences and quasi-Gaussians halton_samples = seq.get(int(_lambda)) quasi_samples = array([[norm.ppf(halton_samples[k][m]) for m in range(dim)]\ for k in range(_lambda)]).T # projection onto e1 proj = quasi_samples[0, :] # the largest order statistic proj_sorted = sort(proj) proj_max[i % n_sample_per_cycle] = proj_sorted[-1] if (i + 1) % n_sample_per_cycle == 0: for k, x in enumerate(x_point): ecdf[k] += np.sum(proj_max <= x)
def Delta2MoneynessImplVol(sigma_delta,delta,tau,y,m_grid=None): # This function, given the implied volatility as a function of # delta-moneyness for a fixed time to maturity, computes the implied # volatility as a function of m-moneyness at the m_moneyness points # specified in m_grid. # INPUTS # sigma_delta [vector]: (1 x k_) implied volatility as a function of # delta-moneyness # delta [vector]: (1 x k_) delta-moneyness corresponding to sigma_delta # tau [scalar]: time to maturity # y [scalar]: risk free rate # m_grid [vector]: (1 x ?) points at which sigma_m is computed # (optional: the default value is an equispaced # grid with 100 spaces) # OUTPUTS # sigma_m [vector]: (1 x ?) implied volatility as a function of # m-moneyness # m_grid [vector]: (1 x ?) m-moneyness corresponding to sigma_m ## Code m_data = norm.ppf(delta)*sigma_delta-(y+sigma_delta**2/2)*sqrt(tau) if m_grid is None: # default option: equispaced grid with 100 spaces n_grid = 100 m_grid = npmin(m_data) + (npmax(m_data)-npmin(m_data))*arange(n_grid+1)/n_grid # m-moneyness interp = interp1d(m_data.flatten(),sigma_delta.flatten(),fill_value='extrapolate') sigma_m = interp(m_grid.flatten()) return sigma_m,m_grid
def FitSigmaSVIphi(tau, delta, sigma, y, theta_var_ATM, theta_phi_start): # Fit the stochastic volatility inspired model # This function calibrates the theta_4,theta_5,theta_6 parameters of the SVI model such # that the theoretical volatility surface best match the observed # volatility surface. Notice that the theta_1,theta_2,theta_3 parameters of the SVI # model are a required input. # INPUTS # tau [vector]: (n_ x 1) times to maturity corresponding to rows of sigma # delta [vector]: (1 x k_) delta-moneyness corresponding to columns of # sigma # sigma [matrix]: (n_ x k_) observed volatility surface # y [vector]: (n_ x 1) risk-free rates corresponding to times to maturity # in tau # theta_var_ATM [structure]: SVI parameters theta_1,theta_2,theta_3 # Fields: theta_1,theta_2,theta_3 # theta_phi_start [structure]: starting parameters for fitting # Fields: theta_4,theta_5,theta_6 # OUTPUTS # theta_phi [structure]: SVI parameters theta_4,theta_5,theta_6 # Fields: theta_4,theta_5,theta_6 # sigma_model [matrix]: (n_ x k_) volatility obtained from the SVI model, # sigma_model(i,j) is the volatility at # tau[i] and delta([j]) ## Code n_ = len(tau) k_ = len(delta) # from delta-moneyness to m-moneyness m = tile(norm.ppf(delta), (n_, 1)) * sigma - (y + sigma**2 / 2) * tile( sqrt(tau[..., np.newaxis]), (1, k_)) # Estimation par_start = [ theta_phi_start.theta4, theta_phi_start.theta5, theta_phi_start.theta6 ] res = least_squares(objective, par_start, args=(tau, m, y, theta_var_ATM, sigma), ftol=1e-9, xtol=1e-9, max_nfev=2 * 600) p = res.x exitFlag = res.status resNorm = res.optimality theta_phi = namedtuple('theta', ['theta4', 'theta5', 'theta6']) theta_phi.theta4 = p[0] theta_phi.theta5 = p[1] theta_phi.theta6 = p[2] sigma_model = SigmaSVI(tau, m, y, theta_var_ATM, theta_phi) return theta_phi, sigma_model, exitFlag, resNorm
def DiscretizeNormalizeParam(tau, k_, model, par): # This function discretizes the one-step normalized pdf when the # distribution is parametrically specified # INPUTS # tau :[scalar] projection horizon # k_ :[scalar] coarseness level # model :[string] specifies the distribution: shiftedLN,.TStudent t.T,Uniform # par :[struct] model parameters # OUTPUTS # xi :[1 x k_] centers of the bins # f :[1 x k_] discretized pdf of invariant ## Code # grid a = -norm.ppf(10**(-15),0,sqrt(tau)) h = 2*a/k_ xi = arange(-a+h,a+h,h) # discretized initial pdf (standardized) if model=='shiftedLN': m, s,_ = ShiftedLNMoments(par) csi = par.c mu = par.mu sig = sqrt(par.sig2) if sign(par.skew)==1: M = (m-csi)/s f = 1/h*(lognorm.cdf(xi+h/2+M,sig,scale=exp(mu-log(s)))-lognorm.cdf(xi-h/2+M,sig,scale=exp(mu-log(s)))) f[k_] = 1/h*(lognorm.cdf(-a+h/2+M,sig,scale=exp(mu-log(s)))-lognorm.cdf(-a+M,sig,scale=exp(mu-log(s))) +\ lognorm.cdf(a+M,sig,scale=exp(mu-log(s)))-lognorm.cdf(a-h/2+M,sig,scale=exp(mu-log(s)))) elif sign(par.skew)==-1: M = (m+csi)/s f = 1/h*(lognorm.cdf(-(xi-h/2+M),sig,scale=exp(mu-log(s)))-lognorm.cdf(-(xi+h/2+M),sig,scale=exp(mu-log(s)))) f[k_-1] = 1/h*(lognorm.cdf(-(-a+M),sig,scale=exp(mu-log(s)))-lognorm.cdf(-(-a+h/2+M),sig,scale=exp(mu-log(s))) +\ lognorm.cdf(-(a-h/2+M),sig,scale=exp(mu-log(s)))-lognorm.cdf(-(a+M),sig,scale=exp(mu-log(s)))) elif model=='Student t': nu = par f = 1/h*(t.cdf(xi+h/2,nu)-t.cdf(xi-h/2,nu)) f[k_-1] = 1/h*(t.cdf(-a+h/2,nu)-t.cdf(-a,nu) + t.cdf(a,nu)-t.cdf(a-h/2,nu)) elif model=='Uniform': mu = par.mu sigma = par.sigma f = zeros(k_) f[(xi>=-mu/sigma)&(xi<=(1-mu)/sigma)] = sigma return xi, f
def CornishFisher(mu, sd, sk, c=None): # This function computes the Cornish-Fisher approximation (up to the second term) # of the quantile function of a generic random variable, given its mean, # standard deviation and skweness for an arbitrary set of confidence # levels. # INPUTS # mu : [scalar] mean # sd : [scalar] standard deviation # sk : [scalar] skewness # c : [vector] (arbitrary length) confidence levels # OP # q : [scalar] Cornish-Fisher approx # For details on the exercise, see here . if c is None: c = arange(.001, 1, 0.001) z = norm.ppf(c) # Cornish-Fisher expansion q = mu + sd @ (z + sk / 6 * (z**2 - 1)) return q
def NormInnov(XZ, m, svec, rho): # This function computes the innovation of a bivariate normal # variable (X,Z).T # INPUTS # XZ : [matrix] (2 x j_) joint scenarios of (X,Z).T # m : [vector] (2 x 1) joint expectation # svec : [vector] (2 x 1) standard deviations # rho : [scalar] correlation # OPS # Psi : [vector] (1 x j_) joint scenarios of innovation # For details on the exercise, see here . ## Code mu = m[0] + rho * (svec[0] / svec[1]) * (XZ[1, :] - m[1]) sigma = sqrt(1 - rho**2) * svec[0] j_ = XZ.shape[1] Psi = norm.cdf(XZ[0, :], mu, sigma * ones((1, j_))) Psi = norm.ppf(Psi, 0, 1) return Psi
def __c_1_lambda_quasi(_lambda, dim, proj_max, halton_samples): from scipy.stats import norm # seq = gh.Halton(int(dim)) # for memory concern n_sample = len(proj_max) for i in range(n_sample): # Halton sequences and quasi-Gaussians quasi_samples = array([[norm.ppf(halton_samples[k][m]) for m in range(dim)]\ for k in range(_lambda)]).T # projection onto e1 proj = quasi_samples[0, :] # the largest order statistic proj_sorted = sort(proj) proj_max[i] = proj_sorted[-1]
def ProjectionStudentT(nu, m, s, T): ## Perform the horizon projection of a Student t invariant # INPUTS # nu : [scalar] degree of freedom # s : [scalar] scatter parameter # m : [scalar] location parameter # T : [scalar] multiple of the estimation period to the invesment horizon # OPS # x_Hor : [scalar] # f_Hor : [scalar] # F_Hor : [scalar] # set up grid N = 2**14 # coarseness level a = -norm.ppf(10**(-15), 0, sqrt(T)) h = 2 * a / N Xi = arange(-a+h , a+ h , h ).T # discretized initial pdf (standardized) f = 1/h*(t.cdf(Xi+h/2,nu) - t.cdf(Xi-h/2,nu)) f[N-1] = 1/h*(t.cdf(-a+h/2,nu) - t.cdf(-a,nu) + t.cdf(a,nu)-t.cdf(a-h/2,nu)) # discretized characteristic function Phi = fft(f) # projection of discretized characteristic function Signs = (-1)**(arange(0,N).T*(T-1)) Phi_T = h**(T-1)*Signs * (Phi**T) # horizon discretized pdf (standardized) f_T = ifft(Phi_T) # horizon discretized pdf and cdf (non-standardized) x_Hor = m * T + s * Xi f_Hor = f_T / s F_Hor = h * cumsum(f_Hor * s) return x_Hor, f_Hor, F_Hor
def _c_1_lambda_quasi(_lambda, dim): """ Numerical computation for progress coefficient c(1, lambda_om) """ from scipy.stats import norm import ghalton as gh seq = gh.Halton(int(dim)) # for memory concern n_sample = 1e3 n_trial = int(1e1 * n_sample) proj_max_quasi = zeros(n_trial) for i in range(n_trial): # Halton sequences and quasi-Gaussians halton_samples = seq.get(int(_lambda)) quasi_samples = array([[norm.ppf(halton_samples[k][m]) for m in range(dim)]\ for k in range(_lambda)]).T # projection onto e1 proj_quasi = quasi_samples[0, :] # the largest order statistic proj_sorted_quasi = sort(proj_quasi) proj_max_quasi[i] = proj_sorted_quasi[-1] epdf_quasi = gaussian_kde(proj_max_quasi) i = lambda x: x * epdf_quasi(x) E, err = quad(i, 0, inf) return E