def optimal_portfolio(returns): n = len(returns) returns = np.asmatrix(returns) N = 100 mus = [10**(5.0 * t/N - 1.0) for t in range(N)] # Convert to cvxopt matrices S = opt.matrix(np.cov(returns)) pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n ,1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), returns, risks
def optimal_portfolio_frontier_conditioned(avg_returns_vec, covariance, n_points_frontier=100): # returns is a n_assets x k_returns_observations matrix # covariance is a n_assets x n_assets matrix, the covariance matrix of returns matrix # credits to https://plotly.com/python/v3/ipython-notebooks/markowitz-portfolio-optimization/ n = len(avg_returns_vec) returns = np.asmatrix(avg_returns_vec) N = n_points_frontier mus = [10 ** (5.0 * t / N - 1.0) for t in range(N)] # Convert to cvxopt matrices # S = opt.matrix(np.cov(returns)) S = opt.matrix(covariance) pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] # CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] # CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), returns, risks
def optimal_portfolios(returns): n = len(returns) returns = np.asmatrix(returns) N = 100000 # Creating a list of returns to optimize the risk for mus = [100**(5.0 * t / N - 1.0) for t in range(N)] # Convert to cvxopt matrices S = opt.matrix(np.cov(returns)) pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] ## Calculate the risk and returns of the frontier returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] return returns, risks
def solve_mvo(Sig, R, bounds, G, resolution=2000, points=100): n = len(Sig) S = matrix(np.asarray(Sig)) pbar = matrix(np.asarray(R)) G = matrix(G) h = matrix(bounds.flatten()) A = matrix(1.0, (1, n)) b = matrix(1.0) N = resolution mus = [10**(10.0 * t / N - 1.0) for t in reversed(range(N))] options['show_progress'] = False xs = [] risks = [] returns = [] last_insert = -999999999.9 min_delta = 0.0001 for mu in mus: x = qp(mu * S, -pbar, G, h, A, b)['x'] risk = np.sqrt(dot(x, S * x)) if abs(risk - last_insert) > min_delta: xs.append(np.array(x).flatten()) returns.append(dot(pbar, x)) risks.append(risk) last_insert = risk xs = np.array(xs) sigmas = np.linspace(min(risks), max(risks), points) er = np.interp(sigmas, risks, returns) f = interp1d(np.array(risks), xs.T) ports = f(sigmas).T return sigmas, er, ports
def optimal_portfolio(returns, N, covs): n = len(returns) returns = np.asmatrix(returns).astype(np.double) # N = 100 mus = [10.**(5.0 * t/N - 1.0) for t in range(N)] # Convert to cvxopt matrices cov = covs.numpy().astype(np.double) S = opt.matrix(cov) pbar = opt.matrix(returns.reshape(-1,1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n ,1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming solvers.options['show_progress'] = False portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) # print(m1[2]) # print(m1[0]) # x1 = np.sqrt(m1[2] / m1[0]) x1=0 # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), returns, risks
def optimal_portfolio(returns, risk_level): n = len(returns) returns = np.asmatrix(returns) N = 100 mus = [10**(5.0 * t/N - 1.0) for t in range(N)] # Convert to cvxopt matrices S = opt.matrix(np.cov(returns)) pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n ,1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] risks_copy = risks return
def optimal_portfolio(results): n = len(results) results = np.asmatrix(results) N = 100 mus = [10**(5.0 * t/N - 1.0) for t in range(N)] S = opt.matrix(np.cov(results)) pbar = opt.matrix(np.mean(results, axis=1)) G = -opt.matrix(np.eye(n)) h = opt.matrix(0.0, (n ,1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] results = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] m1 = np.polyfit(results, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), results, risks
def optimize(self, mu, syms=None): """Optimize porfolio allocation for given mu. http://abel.ee.ucla.edu/cvxopt/userguide/coneprog.html#quadratic-programming :mu: @todo :syms: symbols. default: self.top() :returns: @todo """ if syms is None: syms = self.top() n = len(syms) mvals = [v for v in self.stats.ix[syms]['mean'].values] pbar = matrix(mvals) G = matrix(0.0, (n, n)) G[::n + 1] = -1.0 h = matrix(0.0, (n, 1)) A = matrix(1.0, (1, n)) b = matrix(1.0) C = matrix(self.panel.r[syms].cov().values) options['show_progress'] = False pf = qp(mu * C, -pbar, G, h, A, b) returns = dot(pbar, pf['x']) std = sqrt(dot(pf['x'], C * pf['x'])) return (pf['x'], returns, std)
def optimal_portfolio(returns): ## the number of stock n = len(returns) returns = np.asmatrix(returns) ## different weights N = 100 mus = [10**(5.0 * t/N - 1.0) for t in range(N)] # Convert to cvxopt matrices S = opt.matrix(np.cov(returns)) pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n ,1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] ## calculate risks and returns for efficient frontier returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) ## calculate the optimal portfolio wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), returns, risks
def covmean_portfolio(covariances, mean_returns): ''' returns an optimal portfolio given a covariance matrix and matrix of mean returns ''' n = len(mean_returns) N = 100 mus = [10**(5.0 * t / N - 1.0) for t in range(N)] S = opt.matrix(covariances.values) pbar = opt.matrix(mean_returns) # Create constraint matrices G = -opt.matrix(np.eye(n)) h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] port_list = convert_portfolios(portfolios) ## CALCULATE RISKS AND RETURNS FOR FRONTIER frontier_returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(frontier_returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), frontier_returns, risks, port_list
def EfficientFrontier(returns, pbar): n = len(returns) returns = np.asmatrix(returns) # Convert to cvxopt matrices S = opt.matrix(np.cov(returns)) # Number of points to form efficient frontier (A greater N value requries more iterations and thus # assumes a more accurate representation of the true curve) N=25 mus_min=max(min(pbar),0) mus_max=max(pbar) mus_step=(mus_max - mus_min) / (N-1) mus = [mus_min + i*mus_step for i in range(N)] # cvxopt constraints: Gx <= h, Ax <= b G = opt.matrix(np.concatenate((-np.transpose(pbar),-np.identity(n)),0)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios=[] for r_min in mus: h=opt.matrix(np.concatenate((-np.ones((1,1))*r_min,np.zeros((n,1))),0)) sol = solvers.qp(S, -pbar, G, h, A, b)['x'] portfolios.append(sol) # Calculate risks and returns for the frontier returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] return risks, returns
def efficient_frontier_index(return_rate): solvers.options['show_progress'] = False n_asset = len(return_rate) asset_mean = np.mean(return_rate, axis = 1) #print asset_mean cov = np.cov(return_rate) S = matrix(cov) pbar = matrix(asset_mean) G = matrix(0.0, (n_asset, n_asset)) G[::n_asset + 1] = -1.0 h = matrix(0.0, (n_asset, 1)) A = matrix(1.0, (1, n_asset)) b = matrix(1.0) N = 100 mus = [ 10**(5.0*t/N-1.0) for t in range(N) ] portfolios = [ qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus ] returns = [ dot(pbar,x) for x in portfolios ] risks = [ sqrt(dot(x, S*x)) for x in portfolios ] return risks, returns, portfolios
def optimal_portfolio(returns, S): n = len(returns) N = 100 mus = [10**(5.0 * t / N - 1.0) for t in range(N)] # Convert to cvxopt matrices pbar = opt.matrix(returns) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming solvers.options['show_progress'] = False portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [] returns = [blas.dot(pbar, x) for x in portfolios] risks = [] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] risks = [(sum(risks) / len(risks)) if math.isnan(x) else x for x in risks] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(abs(m1[2] / m1[0])) # CALCULATE THE OPTIMAL PORTFOLIO solvers.options['show_progress'] = False wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), returns, risks, portfolios
def weight_opt(returns,benchmark, lower = 0, upper = 1, ph=2**7, cov_method='sample', seed = 123): np.random.seed(seed) n_asset, n_sample = returns.shape rets = np.asmatrix(returns) #N = 10 #phs = [2**(t-2) for t in range(N)] # Convert to cvxopt matrices if cov_method == 'sample': Cov = opt.matrix(np.cov(rets,benchmark)) elif cov_method == 'lw': Cov = opt.matrix(LedoitWolf().fit(np.append(np.transpose(rets),benchmark.reshape(n_sample,1), axis=1)).covariance_) else: raise ValueError('cov_method should be in {}'.format({'sample', 'lw'})) S = Cov[:n_asset,:n_asset] r_mean = opt.matrix(np.nanmean(rets, axis=1)) # n*1 Cb = Cov[:n_asset,n_asset] # Create constraint matrices G = opt.matrix(np.append(np.eye(n_asset),-np.eye(n_asset),axis = 0)) # 2n x n identity matrix h = opt.matrix(np.append(upper*np.ones((n_asset,1)),-lower*np.ones((n_asset,1)),axis = 0)) A = opt.matrix(1.0, (1, n_asset)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming x = solvers.qp(ph*S, -ph*Cb-r_mean, G, h, A, b)['x'] #portfolios = [solvers.qp(ph*S, -ph*Cb-r_mean, G, h, A, b)['x'] # for ph in phs] # CALCULATE RISKS AND RETURNS FOR FRONTIER ret = blas.dot(r_mean, x) #[blas.dot(r_mean, x) for x in portfolios] errors = blas.dot(x, S*x)+Cov[n_asset,n_asset]-2*blas.dot(Cb,x) #[blas.dot(x, S*x)+Cov[n_asset,n_asset]-2*blas.dot(Cb,x) for x in portfolios] return np.transpose(np.array(x))[0], ret, errors#, ret_opt, risk_opt
def optimal_portfolio(returns): n = len(returns) returns = np.asmatrix(returns) N = 100 mus = [10**(5.0 * t / N - 1.0) for t in range(N)] S = opt.matrix( np.cov(returns) ) #S is the covariance matrix. diagonal is the variance of each stock pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] port_list = convert_portfolios(portfolios) returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) wt = solvers.qp( opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] #Is this the tangency portfolio? X1 = slope from origin? return np.asarray(wt), returns, risks, port_list
def mean_variance(returns, cov=None, shrink_means=False): n = len(returns) returns = np.asmatrix(returns) N = 50 mus = [10**(5.0 * t / N - 1.0) for t in range(N)] # Convert to cvxopt matrices if cov is None: S = opt.matrix(np.cov(returns)) else: S = opt.matrix(cov) if shrink_means: pbar = opt.matrix(np.ones(cov.shape[0])) else: pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt)
def optimal_portfolio(r): n = len(r) # num = r.shape[1] num = 1 ret = np.asmatrix(r) N = 100 mus = [10**(5.0 * t / N - 1.0) for t in range(N)] # Convert to cvxopt matrices S = opt.matrix( np.cov(ret)) ## covariance , diagonal is the variance of each stock pbar = opt.matrix(np.mean(ret, axis=1)) # # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER ret = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(ret, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), num * np.array(ret), np.sqrt( 1.0 * num) * np.array(risks), portfolios
def maximum_return_subject_to_target_risk(): N = 100 mus = [10**(5.0 * t / N - 1.0) for t in range(N)] P = covs q = avg_ret G = matrix(-np.eye(n)) h = matrix(-np.zeros((n, 1))) # equality constraint Ax = b; captures the constraint sum(x) == 1 A = matrix(1.0, (1, n)) b = matrix(1.0) xs = [solvers.qp(mu * covs, q, G, h, A, b)['x'] for mu in mus] returns = [dot(-q.T, x) for x in xs] risks = [np.sqrt(dot(x.T, covs * x)) for x in xs] try: import pylab except ImportError: pass else: pylab.figure(1, facecolor='w') pylab.plot(risks, returns) pylab.xlabel('standard deviation') pylab.ylabel('expected return') pylab.axis([0, 0.2, 0, 0.15]) pylab.title('Risk-return trade-off curve') pylab.yticks([0.00, 0.05, 0.10, 0.15]) #pylab.show() sol = solvers.qp(P, q, G, h, A, b) print(maximum_return_subject_to_target_risk.__name__) print(sol['x']) print(statistics(sol['x']))
def optimal_portfolio(daily_returns, N, r): # Frontier points #Packages import pandas as pd import sklearn.covariance as skcov import numpy as np import cvxopt as opt from cvxopt import blas, solvers import statsmodels.api as sm huber = sm.robust.scale.Huber() n = len(daily_returns.T) returns = np.asmatrix(daily_returns) mus = [(10**(5.0 * t / N - 1.0) - 10**(-1)) for t in range(N)] #cvxopt matrices S = opt.matrix(skcov.ShrunkCovariance().fit(returns).covariance_) returns_av, scale = huber(returns) pbar = opt.matrix(returns_av) # Constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] # Risk and returns returns = [252 * blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(252 * blas.dot(x, S * x)) for x in portfolios] portfolios = [np.eye(n).dot(portfolios[i])[:, 0] for i in range(N)] returns = np.asarray(returns) risks = np.asarray(risks) sharpe = np.divide((returns - r), risks) portfolios = np.asarray(portfolios) return pd.DataFrame( data=np.column_stack((returns, risks, sharpe, portfolios)), columns=(['Returns', 'SD', 'Sharpe'] + list(daily_returns.columns)))
def optimal_portfolio(returns): n = len(returns) returns = np.asmatrix(returns) N = 100 mus = [10**(5.0 * t / N - 1.0) for t in range(N)] # Convert to cvxopt matrices S = opt.matrix(np.cov(returns)) pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative identitity matrix h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming # We have linear component -pbar negative because we do not # want to account for return twice mu and -pbar portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)["x"] for mu in mus] ## CALCULATE RISK AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) ## CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return np.asarray(wt), returns, risks, portfolios
def checksol(sol, A, B, C = None, d = None, G = None, h = None): """ Check optimality conditions C * x + G' * z + A'(Z) + d = 0 G * x <= h z >= 0, || Z || < = 1 z' * (h - G*x) = 0 tr (Z' * (A(x) + B)) = || A(x) + B ||_*. """ p, q = B.size n = A.size[1] if G is None: G = spmatrix([], [], [], (0, n)) if h is None: h = matrix(0.0, (0, 1)) m = h.size[0] if C is None: C = spmatrix(0.0, [], [], (n,n)) if d is None: d = matrix(0.0, (n, 1)) if sol['status'] is 'optimal': res = +d base.symv(C, sol['x'], res, beta = 1.0) base.gemv(G, sol['z'], res, beta = 1.0, trans = 'T') base.gemv(A, sol['Z'], res, beta = 1.0, trans = 'T') print "Dual residual: %e" %blas.nrm2(res) if m: print "Minimum primal slack (scalar inequalities): %e" \ %min(h - G*sol['x']) print "Minimum dual slack (scalar inequalities): %e" \ %min(sol['z']) if p: s = matrix(0.0, (p,1)) X = matrix(A*sol['x'], (p, q)) + B lapack.gesvd(+X, s) nrmX = sum(s) lapack.gesvd(+sol['Z'], s) nrmZ = max(s) print "Norm of Z: %e" %nrmZ print "Nuclear norm of A(x) + B: %e" %nrmX print "Inner product of Z and A(x) + B: %e" \ %blas.dot(sol['Z'], X) elif sol['status'] is 'primal infeasible': res = matrix(0.0, (n,1)) base.gemv(G, sol['z'], res, beta = 1.0, trans = 'T') print "Dual residual: %e" %blas.nrm2(res) print "h' * z = %e" %blas.dot(h, sol['z']) print "Minimum dual slack (scalar inequalities): %e" \ %min(sol['z']) else: pass
def checksol(sol, A, B, C=None, d=None, G=None, h=None): """ Check optimality conditions C * x + G' * z + A'(Z) + d = 0 G * x <= h z >= 0, || Z || < = 1 z' * (h - G*x) = 0 tr (Z' * (A(x) + B)) = || A(x) + B ||_*. """ p, q = B.size n = A.size[1] if G is None: G = spmatrix([], [], [], (0, n)) if h is None: h = matrix(0.0, (0, 1)) m = h.size[0] if C is None: C = spmatrix(0.0, [], [], (n, n)) if d is None: d = matrix(0.0, (n, 1)) if sol['status'] is 'optimal': res = +d base.symv(C, sol['x'], res, beta=1.0) base.gemv(G, sol['z'], res, beta=1.0, trans='T') base.gemv(A, sol['Z'], res, beta=1.0, trans='T') print "Dual residual: %e" % blas.nrm2(res) if m: print "Minimum primal slack (scalar inequalities): %e" \ %min(h - G*sol['x']) print "Minimum dual slack (scalar inequalities): %e" \ %min(sol['z']) if p: s = matrix(0.0, (p, 1)) X = matrix(A * sol['x'], (p, q)) + B lapack.gesvd(+X, s) nrmX = sum(s) lapack.gesvd(+sol['Z'], s) nrmZ = max(s) print "Norm of Z: %e" % nrmZ print "Nuclear norm of A(x) + B: %e" % nrmX print "Inner product of Z and A(x) + B: %e" \ %blas.dot(sol['Z'], X) elif sol['status'] is 'primal infeasible': res = matrix(0.0, (n, 1)) base.gemv(G, sol['z'], res, beta=1.0, trans='T') print "Dual residual: %e" % blas.nrm2(res) print "h' * z = %e" % blas.dot(h, sol['z']) print "Minimum dual slack (scalar inequalities): %e" \ %min(sol['z']) else: pass
def optimize_portfolio(self): optimization = {} n = self.n S = matrix(2 * self.statistics["covariance"]) expected_returns = matrix(self.statistics["expected_asset_returns"]) G, h, A, b = self.optimization_constraint_matrices() mu_array = [10**(5.0 * t / 100 - 1.0) for t in range(100)] portfolio_weights = [ solvers.qp(mu * S, -expected_returns, G, h, A, b)["x"] for mu in mu_array ] returns = [dot(expected_returns, w) for w in portfolio_weights] risk = [np.sqrt(dot(w, S * w)) for w in portfolio_weights] # Calculate the portfolio with the greatest "reward-to-risk" ratio, which # is Sharpe's ratio. Notice that it is not necessary to specify the risk # free rate in the calculation of Sharpe's ratio, as without loss of generality # it may be assumed to be zero. In either case, the same portfolio will # achieve the maximum. However, since the risk free asset defaults to a # Treasury bill, we take no action regarding this observation. mu_free = self.risk_free.statistics["expected_return"] sharpe_ratio = (returns - mu_free) / risk max_sharpe_index = sharpe_ratio == max(sharpe_ratio) min_variance_index = risk == min(risk) optimization["returns"] = returns optimization["risk"] = risk # If possible, try to decrease the number of for loops used to extract the # optimal weights of the portfolio. At the time of writing this, it seems # that the matrix data structure is somewhat bizarre. Therefore, in order to # generate the desired numpy array object, so many for loops turned out to # be necessary. max_sharpe_weights = [ portfolio_weights[i] for i in range(len(portfolio_weights)) if max_sharpe_index[i] ] min_variance_weights = [ portfolio_weights[i] for i in range(len(portfolio_weights)) if min_variance_index[i] ] optimization["max_sharpe_weights"] = np.zeros((n, 1)) optimization["min_variance_weights"] = np.zeros((n, 1)) for i in range(len(max_sharpe_weights[0])): optimization["max_sharpe_weights"][i] = max_sharpe_weights[0][i] for i in range(len(min_variance_weights[0])): optimization["min_variance_weights"][i] = min_variance_weights[0][ i] return optimization
def optimalPortfolio(navReturn, nbr, pre, risk_free, fit_frequency): # harry markoviz optimizer # Convert to cvxopt matricess navReturn = np.asmatrix(navReturn.T.values) if isinstance(pre, pd.DataFrame): S = opt.matrix(np.asarray(pre)) else: S = opt.matrix(np.cov(navReturn)) # S-> covariance matrix pbar = opt.matrix(np.mean(navReturn, axis=1)) # pbar -> expected returns # Create constraint matrices G = -opt.matrix(np.eye(nbr)) # negative nbr x nbr identity matrix h = opt.matrix(0.0, (nbr, 1)) # all weight >= 0 A = opt.matrix(1.0, (1, nbr)) b = opt.matrix(1.0) # weights sum up to 1 # scale desired returns N = 100 mus = [10**(4 * t / N - 2) for t in range(N)] # desired portfolio returns # Calculate [efficient frontier] weights using quadratic programming portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus ] # x-> weighted portfolios| solvers.qp(P,q,G,h,A,b) # CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] r_r = pd.DataFrame({'risks': risks, 'returns': returns}) # FIT THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) # polyfit: x,y,degree # CALCULATE 3 OUTPUT: MAXIMUM SHARPE RATIO/ DEFAULT/ MINIMUM RISK srp = sharpeRatio(r_r, None, risk_free, fit_frequency, False) srp_y = float(srp.loc[srp['sharpeRatio'] == srp['sharpeRatio'].max(), 'returns']) # srp_y = srp[0] if isinstance(srp_y,seri) else srp_y # TODO AVOID MULTIPLE RESULT min_y = -m1[1] / (2 * m1[0]) # -(b/2a) if (m1[2] / m1[0]) < 0: y1 = min_y else: y1 = np.sqrt(m1[2] / m1[0]) port = [] for i in [srp_y, y1, min_y]: op_wt = solvers.qp( opt.matrix(i * S), -pbar, G, h, A, b)['x'] # CALCULATE THE [OPTIMAL PORTFOLIO WEIGHT] with min risk op_return = blas.dot(pbar, op_wt) # return under OPTIMAL PORTFOLIO op_risk = np.sqrt(blas.dot(op_wt, S * op_wt)) # risk under OPTIMAL PORTFOLIO port.append([np.asarray(op_wt), op_return, op_risk]) return port
def find_the_desired_optimum_portfolio(self): n = 4 S = matrix(self.covariance_matrix) print(S) xxx pbar = matrix([.12, .10, .07, .03]) G = matrix(0.0, (n, n)) G[::n + 1] = -1.0 h = matrix(0.0, (n, 1)) A = matrix(1.0, (1, n)) b = matrix(1.0) N = 100 mus = [10**(5.0 * t / N - 1.0) for t in range(N)] options['show_progress'] = False xs = [qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] returns = [dot(pbar, x) for x in xs] risks = [sqrt(dot(x, S * x)) for x in xs] # epsilon = 0.01 * (max(std_portfolio_points_list) - min(std_portfolio_points_list)) # number_of_portfolio_points = len(expected_return_portfolio_points_list) # risk_tolerance = min(std_portfolio_points_list) + ((self.risk_tolerance_percentage / 100) * (max(std_portfolio_points_list) - min(std_portfolio_points_list))) # need_more_search = True # while need_more_search: # to check whether there exists any point in the region close to risk tolerance # portfolio_optimum_expected_return = -1 * np.inf # for point_index in range(number_of_portfolio_points): # if abs(std_portfolio_points_list[point_index] - (risk_tolerance - 0.5*epsilon)) <= epsilon/2: # # the point has the risk close to risk tolerance (between tolerance-epsilon and tolerance) # if expected_return_portfolio_points_list[point_index] > portfolio_optimum_expected_return: # portfolio_optimum_expected_return = expected_return_portfolio_points_list[point_index] # portfolio_optimum_std = std_portfolio_points_list[point_index] # portfolio_optimum_weights = weights_list[point_index] # need_more_search = False # epsilon += 0.001 # # portfolio_optimum_expected_return_list, portfolio_optimum_std_list, portfolio_optimum_weights_list = [], [], [] # for point_index in range(number_of_portfolio_points): # if (expected_return_portfolio_points_list[point_index] > portfolio_optimum_expected_return) and \ # (std_portfolio_points_list[point_index] < portfolio_optimum_std): # portfolio_optimum_expected_return_temp = expected_return_portfolio_points_list[point_index] # portfolio_optimum_std_temp = std_portfolio_points_list[point_index] # portfolio_optimum_weights_temp = weights_list[point_index] # portfolio_optimum_expected_return_list.append(portfolio_optimum_expected_return_temp) # portfolio_optimum_std_list.append(portfolio_optimum_std_temp) # portfolio_optimum_weights_list.append(portfolio_optimum_weights_temp) # if not (not portfolio_optimum_expected_return_list): # If aa is not empty (If we do not have already the optimum point) # optimum_index = np.argmax(np.array(portfolio_optimum_expected_return_list)) # portfolio_optimum_expected_return = portfolio_optimum_expected_return_list[optimum_index] # portfolio_optimum_std = portfolio_optimum_std_list[optimum_index] # portfolio_optimum_weights = portfolio_optimum_weights_list[optimum_index] return portfolio_optimum_expected_return, portfolio_optimum_std, portfolio_optimum_weights, risk_tolerance
def markowitz_optimizer(self, target, plot_frontier): returns_df = self.daily_returns.copy() portfolio_sz = len(self.stockList) returns = np.asmatrix(returns_df) # Convert to cvxopt matrices S = opt.matrix(np.cov(returns.T)) pbar = opt.matrix(np.mean(returns, axis=0)).T # Create constraint matrices G = -opt.matrix(np.eye(portfolio_sz)) # negative n x n identity matrix h = opt.matrix(0.0, (portfolio_sz ,1)) A = opt.matrix(1.0, (1, portfolio_sz)) b = opt.matrix(1.0) if plot_frontier: N = 100 mus = [10**(5.0 * t/N - 1.0) for t in range(N)] # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) if m1[0] != 0: x1 = np.sqrt(m1[2] / m1[0]) else: x1 = 0. # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] print 'The Optimal Value:\n' print np.asarray(wt) print 'The Corresponding Return:%f\n' %(x1) globalStats = GlobalStats(returns_df) means = globalStats.get_mean() stds = globalStats.get_std() plt.figure() plt.plot(stds, means, 'o') plt.ylabel('mean') plt.xlabel('std') plt.plot(risks, returns, 'y-o') # get the target portfolio target_portfolio = solvers.qp(target * S, -pbar, G, h, A, b)['x'] target_return = blas.dot(pbar,target_portfolio) target_risk = np.sqrt(blas.dot(target_portfolio, S * target_portfolio)) target_portfolio = np.asarray(target_portfolio) self.optimalAllocation = np.array([x[0] for x in target_portfolio])
def optimal_portfolio(returns): n = len(returns) returns = np.asmatrix(returns) N = 100 mus = [10**(5.0 * t / N - 1.0) for t in range(N)] # Convert to cvxopt matrices S = opt.matrix(np.cov(returns)) pbar = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n, 1)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu * S, -pbar, G, h, A, b)['x'] for mu in mus] for x in portfolios: weight_list.append( [round(x[0], 3), round(x[1], 3), round(x[2], 3), round(x[3], 3)]) opt_weight_list.append( [round(x[0], 3), round(x[1], 3), round(x[2], 3), round(x[3], 3)]) #weight_list.append([round(x[0],3), round(x[1],3), round(x[2],3)]) ## CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] for re in returns: mean_list.append(re) for ri in risks: sigma_list.append(ri) ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] print "-------------------debug-------------------" print x1 return np.asarray(wt), returns, risks
def markowitz(returns, bounds): """Return the optimal tuple (weights strategies, returns, volatilities) acording to Markowitz's portfolio theory. :param returns: List of list of returns for each strategies. :param bounds: List of tuple of the bounds of the weights strategies. For example : [(0,1), (0,1), (0.5,1)].""" n = len(returns) if (len(returns) != len(bounds)): raise TypeError( 'The number of bounds does notmatch the number of strategy. Should have {} bounds' .format(n)) returns = np.asmatrix(returns) N = 100 mus = [10**(5.0 * t / N - 2.0) for t in range(N)] # Convert to cvxopt matrices. S = optz.matrix(np.cov(returns)) pbar = optz.matrix(np.mean(returns, axis=1)) # Create constraint matrices. sgn_bound = [(np.eye(1, n, k=ind)[0] * -1, np.eye(1, n, k=ind)[0] * 1) for ind in range(n)] sgn_bound = [list(i) for j in sgn_bound for i in j] constraint = [ np.sign(ind - 0.5) * bound for strategy in bounds for ind, bound in enumerate(strategy) ] G = optz.matrix(sgn_bound).T h = optz.matrix(constraint) A = optz.matrix(1.0, (1, n)) b = optz.matrix(1.0) # Calculate efficient frontier weights using quadratic programming. portfolios = [] for mu in mus: portfolios.append(solvers.qp(mu * S, -pbar, G, h, A, b)['x']) try: portfolios.append(solvers.qp(mu * S, -pbar, G, h, A, b)['x']) except: pass # CALCULATE RISKS AND RETURNS FOR FRONTIER. optimal_returns = [blas.dot(pbar, x) for x in portfolios] optimal_risks = [np.sqrt(blas.dot(x, S * x)) for x in portfolios] # CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE. m1 = np.polyfit(optimal_returns, optimal_risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO. try: optimal_weights = solvers.qp(optz.matrix(x1 * S), -pbar, G, h, A, b)['x'] except: optimal_weights = np.nan optimal_weights = np.asarray(optimal_weights) return optimal_weights, optimal_returns, optimal_risks
def run(self): cvxopt.solvers.options['show_progress'] = self.show_progress self.portfolios = [ qp(float(t) * self.S, -self.pbar, self.G, self.h, self.A, self.b)['x'] for t in self.mus ] self.returns = [dot(self.pbar, x) for x in self.portfolios] self.risks = [sqrt(dot(x, self.S * x)) for x in self.portfolios] self.sharpe = [ (self.returns[i] - self.risk_free_rate) / (self.risks[i] + 1e-4) for i in range(len(self.returns)) ] self.weights = [ dict(zip(self.names, portfolio)) for portfolio in self.portfolios ]
def optimal_portfolio(returns): n = len(returns) returns = numpy.asmatrix(returns) N = 100 mus = [10**(5.0 * t/N - 1.0) for t in range(N)] # Convert to cvxopt matrices S = matrix(numpy.cov(returns)) print S # extra print stmts print "I am opt related" #######FIXME ABOVE ######################### pbar = matrix(numpy.mean(returns, axis=1)) # Create constraint matrices m=[[-1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0],[0.0,-1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0],[0.0,0.0,-1.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0],[0.0,0.0,0.0,-1.0,0.0,0.0,0.0,0.0,1.0,0.0],[0.0,0.0,0.0,0.0,-1.0,0.0,0.0,0.0,0.0,1.0]] G = matrix(m) ab=[0,0,0,0,0,0.3,0.3,0.3,0.3,0.3] h=matrix(ab) # G = -matrix(numpy.eye(n)) # negative n x n identity matrix # h = matrix(0.0, (n ,1)) A = matrix(1.0, (1, n)) b = matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER returns = [blas.dot(pbar, x) for x in portfolios] risks = [numpy.sqrt(blas.dot(x, S*x)) for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = numpy.polyfit(returns, risks, 2) x1 = numpy.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(matrix(x1 * S), -pbar, G, h, A, b)['x'] weights = matrix(wt) printing.options['dformat'] = '%.1f' #rounding up weights to 1 decimal # return numpy.asarray(wt) return weights #changed in matrix """result looks like the follwg-
def dot(X, Y): """ Computes trace product of X and Y. """ assert Y.symb == X.symb, "Symbolic factorization mismatch" assert X.is_factor is False, "cspmatrix factor object" assert Y.is_factor is False, "cspmatrix factor object" snptr = X.symb.snptr sncolptr = X.symb.sncolptr blkptr = X.symb.blkptr val = 0.0 for k in range(X.symb.Nsn): offset = blkptr[k] nk = snptr[k + 1] - snptr[k] ck = sncolptr[k + 1] - sncolptr[k] for j in range(nk): val -= X.blkval[offset + ck * j + j] * Y.blkval[offset + ck * j + j] val += 2.0 * blas.dot(X.blkval, Y.blkval, offsetx=offset + ck * j + j, offsety=offset + ck * j + j, n=ck - j) return val
def f(x, y, z): # z := - W**-T * z z[:n] = -div(z[:n], d1) z[n:2 * n] = -div(z[n:2 * n], d2) z[2 * n:] -= 2.0 * v * (v[0] * z[2 * n] - blas.dot(v[1:], z[2 * n + 1:])) z[2 * n + 1:] *= -1.0 z[2 * n:] /= beta # x := x - G' * W**-1 * z x[:n] -= div(z[:n], d1) - div(z[n:2 * n], d2) + As.T * z[-(m + 1):] x[n:] += div(z[:n], d1) + div(z[n:2 * n], d2) # Solve for x[:n]: # # S*x[:n] = x[:n] - (W1**2 - W2**2)(W1**2 + W2**2)^-1 * x[n:] x[:n] -= mul(div(d1**2 - d2**2, d1**2 + d2**2), x[n:]) lapack.potrs(S, x) # Solve for x[n:]: # # (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n] x[n:] += mul(d1**-2 - d2**-2, x[:n]) x[n:] = div(x[n:], d1**-2 + d2**-2) # z := z + W^-T * G*x z[:n] += div(x[:n] - x[n:2 * n], d1) z[n:2 * n] += div(-x[:n] - x[n:2 * n], d2) z[2 * n:] += As * x[:n]
def f(x, y, z): # z := - W**-T * z z[:n] = -div( z[:n], d1 ) z[n:2*n] = -div( z[n:2*n], d2 ) z[2*n:] -= 2.0*v*( v[0]*z[2*n] - blas.dot(v[1:], z[2*n+1:]) ) z[2*n+1:] *= -1.0 z[2*n:] /= beta # x := x - G' * W**-1 * z x[:n] -= div(z[:n], d1) - div(z[n:2*n], d2) + As.T * z[-(m+1):] x[n:] += div(z[:n], d1) + div(z[n:2*n], d2) # Solve for x[:n]: # # S*x[:n] = x[:n] - (W1**2 - W2**2)(W1**2 + W2**2)^-1 * x[n:] x[:n] -= mul( div(d1**2 - d2**2, d1**2 + d2**2), x[n:]) lapack.potrs(S, x) # Solve for x[n:]: # # (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n] x[n:] += mul( d1**-2 - d2**-2, x[:n]) x[n:] = div( x[n:], d1**-2 + d2**-2) # z := z + W^-T * G*x z[:n] += div( x[:n] - x[n:2*n], d1) z[n:2*n] += div( -x[:n] - x[n:2*n], d2) z[2*n:] += As*x[:n]
def allocation(mu, opts={}): n = 4 G = matrix(0.0, (n,n)) G[::n+1] = -1.0 h = matrix(0.0, (n,1)) A = matrix(1.0, (1,n)) b = matrix(1.0) if opts: options.update(opts) print "mu*S=\n", mu*S print "-pbar=\n", -pbar x = qp(mu*S, -pbar, G, h, A, b)['x'] ret = dot(pbar,x) risk = sqrt(dot(x, S*x)) return x, ret, risk
def lnprob(theta, Deltam, nsne, xi, redshiftterm): A, M, sigma, pv = theta if (A <= 0 or sigma < 0 or pv < 0): return -numpy.inf C = A * numpy.array(xi) numpy.fill_diagonal( C, C.diagonal() + sigma**2 / nsne + (pv * redshiftterm)**2) mterm = Deltam - M C = matrix(C) W = matrix(mterm) try: lapack.posv(C, W, uplo='U') except ArithmeticError: return -np.inf logdetC = 2 * numpy.log(numpy.array(C).diagonal()).sum() lp = -0.5 * (logdetC + blas.dot(matrix(mterm), W)) + cauchy.logpdf( sigma, loc=0.08, scale=0.5) + cauchy.logpdf( pv, loc=0, scale=600 / 3e5) if not numpy.isfinite(lp): return -np.inf return lp
def F(x=None, z=None): if x is None: return 0, matrix(0.0, (n+1,1)) w = exp(A*x) f = dot(c,x) + sum(log(1+w)) grad = c + A.T * div(w, 1+w) if z is None: return matrix(f), grad.T H = A.T * spdiag(div(w,(1+w)**2)) * A return matrix(f), grad.T, z[0]*H
def OptimalWeights(returns, rmin, pbar): n = len(returns) returns = np.asmatrix(returns) # Convert to cvxopt matrices S = opt.matrix(np.cov(returns)) # Input number of iterations for mu (the number of points forming the efficient frontier) N=2 # Iterations of mu are based on historical data mus_min=max(min(pbar), 0) mus_max=max(pbar) mus_step=(mus_max - mus_min) / (N-1) mus = [mus_min + i*mus_step for i in range(N)] # The following is an alternative way of calculating mus # mus = [10**(5.0 * t/N - 1.0) for t in range(N)] # cvxopt parameters: Gx <= h, Ax <= b G = opt.matrix(np.concatenate((-np.transpose(pbar),-np.identity(n)),0)) h=opt.matrix(np.concatenate((-np.ones((1,1))*rmin, np.zeros((n,1))),0)) A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(S, -pbar, G, h, A, b)['x'] for mu in mus] # Calculate risks and returns for the minimum return portfolio returns_rmin = [blas.dot(pbar, x) for x in portfolios] risks_rmin = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] # Calculate the 2nd degree polynomial of the frontier curve at the minimum return portfolio m1 = np.polyfit(returns_rmin, risks_rmin, 2) x1 = np.sqrt(m1[2] / m1[0]) # Calculate the Optimal Portfolio h_mod= opt.matrix(np.concatenate((-np.ones((1,1))*x1, np.zeros((n,1))),0)) wt = solvers.qp(S, -pbar, G, h_mod, A, b) sol = opt.matrix(wt['x']) expected_return = [blas.dot(pbar, sol)] expected_std = np.sqrt(sum([blas.dot(sol, S*sol)])) print 'Weights' print list(sol) print 'Expected Vol' print expected_std print 'Expected Ret' print expected_return return list(sol), expected_return, expected_std
def optimize_portfolio(self): optimization = {} n = self.n S = matrix(2 * self.statistics["covariance"]) expected_returns = matrix(self.statistics["expected_asset_returns"]) G, h, A, b = self.optimization_constraint_matrices() mu_array = [10**(5.0*t/100-1.0) for t in range(100)] portfolio_weights = [solvers.qp(mu*S,-expected_returns,G,h,A,b)["x"] for mu in mu_array] returns = [dot(expected_returns,w) for w in portfolio_weights] risk = [np.sqrt(dot(w,S*w)) for w in portfolio_weights] # Calculate the portfolio with the greatest "reward-to-risk" ratio, which # is Sharpe's ratio. Notice that it is not necessary to specify the risk # free rate in the calculation of Sharpe's ratio, as without loss of generality # it may be assumed to be zero. In either case, the same portfolio will # achieve the maximum. However, since the risk free asset defaults to a # Treasury bill, we take no action regarding this observation. mu_free = self.risk_free.statistics["expected_return"] sharpe_ratio = (returns - mu_free) / risk max_sharpe_index = sharpe_ratio == max(sharpe_ratio) min_variance_index = risk == min(risk) optimization["returns"] = returns optimization["risk"] = risk # If possible, try to decrease the number of for loops used to extract the # optimal weights of the portfolio. At the time of writing this, it seems # that the matrix data structure is somewhat bizarre. Therefore, in order to # generate the desired numpy array object, so many for loops turned out to # be necessary. max_sharpe_weights = [portfolio_weights[i] for i in range(len(portfolio_weights)) if max_sharpe_index[i]] min_variance_weights = [portfolio_weights[i] for i in range(len(portfolio_weights)) if min_variance_index[i]] optimization["max_sharpe_weights"] = np.zeros((n,1)) optimization["min_variance_weights"] = np.zeros((n,1)) for i in range(len(max_sharpe_weights[0])): optimization["max_sharpe_weights"][i] = max_sharpe_weights[0][i] for i in range(len(min_variance_weights[0])): optimization["min_variance_weights"][i] = min_variance_weights[0][i] return optimization
def qscal(u, beta, v, inv = False): """ Transforms the vector u as u := beta * (2*v*v' - J) * u if 'inv' is False and as u := (1/beta) * (2*J*v*v'*J - J) * u if 'inv' is True. """ if not inv: tmp = blas.dot(u,v) u[0] *= -1 u += 2 * v * tmp u *= beta else: u[0] *= -1.0 tmp = blas.dot(v,u) u[0] -= 2*v[0] * tmp u[1:] += 2*v[1:] * tmp u /= beta
def _hedge_with_zeroone_constraint(self, normalize=True): ### common set up n = len(self.meanList) PP = matrix(self.covMat) P = self.covMat.copy() UU, ss, VV = np.linalg.svd(P) lamb = min(ss) * 0.99 print lamb P[::n+1] -= lamb P = matrix(P) pbar = matrix(self.meanList) G = matrix(0.0, (n, n)) G[::n+1] = -1.0 key = self.fixedPosition.keys() A = matrix(0.0, (len(key), n)) b = matrix(0.0, (len(key), 1)) for ii in range(len(key)): A[ii, key[ii]] = 1.0 b[ii, 0] = self.fixedPosition[ii] def sum_or_1(x): valu = sum(x) if valu == 0.0: return 1.0 else : return valu NN = 100 mus = [ 10 ** (10.0 * t/NN - 1.0) for t in range(NN)] if normalize: #portfolios = [matrix(np.round(solvers.qp(mu * P, -pbar, G, h, A, matrix(nn*1.0))['x'] * nn) * 1.0 / nn) for mu in mus for nn in range(1, n+1)] portfolios = [matrix(1.0 * (np.round(solvers.qp(mu * P, -pbar + lamb, G, h, A, matrix(nn*1.0))['x']) > 0)) for mu in mus for nn in range(1, n+1)] portfolios = [x / sum_or_1(x) for x in portfolios] else : portfolios = [matrix(1.0 * (np.round(solvers.qp(mu * P, -pbar + lamb, G, h, A, matrix(nn*1.0))['x']) > 0)) for mu in mus for nn in range(1, n+1)] returns = [ dot(pbar, x) for x in portfolios ] risks = [np.sqrt(dot(x, PP * x)) for x in portfolios] idx = np.argmin(np.abs(np.array(risks) - self.targetRisk)) self.optimizedWeights = np.array(portfolios[idx]).T.tolist()[0] self.optimizedMean = returns[idx] self.optimizedRisk = risks[idx]
def _optimize_with_no_constraint(self): ### common set up n = len(self.meanList) P = matrix(self.covMat) pbar = matrix(self.meanList) G = matrix(0.0, (n, n)) h = matrix(0.0, (n, 1)) A = matrix(1.0, (1, n)) b = matrix(1.0) NN = 100 mus = [ 10 ** (10.0 * t/NN - 1.0) for t in range(NN)] portfolios = [solvers.qp(mu * P, -pbar, G, h, A, b)['x'] for mu in mus] returns = [ dot(pbar, x) for x in portfolios ] risks = [np.sqrt(dot(x, P * x)) for x in portfolios] idx = np.argmin(np.abs(np.array(risks) - self.targetRisk)) self.optimizedWeights = np.array(portfolios[idx]).T.tolist()[0] self.optimizedMean = returns[idx] self.optimizedRisk = risks[idx]
def efficient_frontier(mu, Sigma): (row, col) = Sigma.size (dim, one) = mu.size if row != col or dim != col or one != 1: print "portfolio dim error: %d %d %d %d" % (row, col, dim, one) n = row # we don't need to change the below setting G = matrix(0.0, (n,n)) G[::n+1] = -1.0 h = matrix(0.0, (n,1)) # G and h: -x <= 0 i.e. x >= 0 A = matrix(1.0, (1,n)) b = matrix(1.0) # A and b: sum(x) = 1 # Compute trade-off. N = 200 alphas = [ 10**(5.0*t/N-1.0) for t in range(N) ] portfolios = [ qp(alpha * Sigma, -mu, G, h, A, b)['x'] for alpha in alphas ] returns = [ dot(mu, x) for x in portfolios ] risks = [ sqrt(dot(x, Sigma * x)) for x in portfolios ] return (returns, risks)
def optimize1(self, mu): """Optimize for a given level of risk :returns: [[Allocations], returns, stddev] """ options['show_progress'] = False try: pf = qp(mu * self.C, -self.pbar, self.G, self.h, self.A, self.b) except Exception as e: print e return None if pf['status'] is "unknown": return None #raise RuntimeError("optimization status unknown") returns = dot(self.pbar, pf['x']) std = sqrt(dot(pf['x'], self.C * pf['x'])) return [pf['x'].T, returns, std]
def f(x, y, z): minor = 0 if not helpers.sp_minor_empty(): minor = helpers.sp_minor_top() else: global loopf loopf += 1 minor = loopf helpers.sp_create("00-f", minor) # z := - W**-T * z z[:n] = -div( z[:n], d1 ) z[n:2*n] = -div( z[n:2*n], d2 ) z[2*n:] -= 2.0*v*( v[0]*z[2*n] - blas.dot(v[1:], z[2*n+1:]) ) z[2*n+1:] *= -1.0 z[2*n:] /= beta # x := x - G' * W**-1 * z x[:n] -= div(z[:n], d1) - div(z[n:2*n], d2) + As.T * z[-(m+1):] x[n:] += div(z[:n], d1) + div(z[n:2*n], d2) helpers.sp_create("15-f", minor) # Solve for x[:n]: # # S*x[:n] = x[:n] - (W1**2 - W2**2)(W1**2 + W2**2)^-1 * x[n:] x[:n] -= mul( div(d1**2 - d2**2, d1**2 + d2**2), x[n:]) helpers.sp_create("25-f", minor) lapack.potrs(S, x) helpers.sp_create("30-f", minor) # Solve for x[n:]: # # (d1**-2 + d2**-2) * x[n:] = x[n:] + (d1**-2 - d2**-2)*x[:n] x[n:] += mul( d1**-2 - d2**-2, x[:n]) helpers.sp_create("35-f", minor) x[n:] = div( x[n:], d1**-2 + d2**-2) helpers.sp_create("40-f", minor) # z := z + W^-T * G*x z[:n] += div( x[:n] - x[n:2*n], d1) helpers.sp_create("44-f", minor) z[n:2*n] += div( -x[:n] - x[n:2*n], d2) helpers.sp_create("48-f", minor) z[2*n:] += As*x[:n] helpers.sp_create("50-f", minor)
def optimal_portfolio_cvxopt(C, R=None, EF=False): n = C.shape[0] # num of assets in the port if R is None: R = np.ones(n) # min. variance setting # Convert to cvxopt matrices # minimize: w * mu*S * w + mean_rets *x S = opt.matrix(C) pbar = opt.matrix(R) # Create constraint matrices # Gx < h: Every weights is positive G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n ,1)) # Ax = b: sum of all weights equal to 1 A = opt.matrix(1.0, (1, n)) b = opt.matrix(1.0) if EF: effr_mean, effr_var, effr_weights = [], [], [] N = 50 mus = [10**(5.0 * t/N - 1.0) for t in range(N)] # Calculate efficient frontier weights using quadratic programming portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus] ## CALCULATE RISKS AND RETURNS FOR FRONTIER effr_mean = [blas.dot(pbar, x) for x in portfolios] effr_var = [np.sqrt(blas.dot(x, S*x)) for x in portfolios] effr_weights = [np.asarray(x).ravel() for x in portfolios] ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE m1 = np.polyfit(returns, risks, 2) x1 = np.sqrt(m1[2] / m1[0]) # CALCULATE THE OPTIMAL PORTFOLIO wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] return effr_mean, effr_var, effr_weights else: wt = solvers.qp(opt.matrix(S), -pbar, G, h, A, b)['x'] return np.asarray(wt).ravel()
def G(x, y, alpha = 1.0, beta = 0.0, trans = 'N'): """ Implements the linear operator [ -DX E -d -I ] [ 0 0 0 -I ] [ 0 -e_1' 0 0 ] G = [ -P_1' 0 0 0 ] [ . . . . ] [ 0 -e_k' 0 0 ] [ -P_k' 0 0 0 ] and its adjoint G'. """ if trans == 'N': tmp = +y[:m] # y[:m] = alpha*(-DXw + Et - d*b - v) + beta*y[:m] base.gemv(E, x[n:n+k], tmp, alpha = alpha, beta = beta) blas.axpy(x[n+k+1:], tmp, alpha = -alpha) blas.axpy(d, tmp, alpha = -alpha*x[n+k]) y[:m] = tmp base.gemv(X, x[:n], tmp, alpha = alpha, beta = 0.0) tmp = mul(d,tmp) y[:m] -= tmp # y[m:2*m] = -v y[m:2*m] = -alpha * x[n+k+1:] + beta * y[m:2*m] # SOC 1,...,k for i in range(k): l = 2*m+i*(n+1) y[l] = -alpha * x[n+i] + beta * y[l] y[l+1:l+1+n] = -alpha * P[i] * x[:n] + beta * y[l+1:l+1+n]; else: tmp1 = mul(d,x[:m]) tmp2 = y[:n] blas.gemv(X, tmp1, tmp2, trans = 'T', alpha = -alpha, beta = beta) for i in range(k): l = 2*m+1+i*(n+1) blas.gemv(P[i], x[l:l+n], tmp2, trans = 'T', alpha = -alpha, beta = 1.0) y[:n] = tmp2 tmp2 = y[n:n+k] base.gemv(E, x[:m], tmp2, trans = 'T', alpha = alpha, beta = beta) blas.axpy(x[2*m:2*m+k*(1+n):n+1], tmp2, alpha = -alpha) y[n:n+k] = tmp2 y[n+k] = -alpha * blas.dot(d,x[:m]) + beta * y[n+k] y[n+k+1:] = -alpha * (x[:m] + x[m:2*m]) + beta * y[n+k+1:]
def getlowerexpectation(self,obj): """Compute the lower expectation of the given function :param obj: values of the function whose lower expectation is to be computed :param type: :class:`~numpy.array` :return: the lower expectation value :rtype: float """ if(CredalSet.issubsetmask(obj)): return self.getlowerprobability(obj) else: solution=self.solvelowerexpectation(obj) return dot(solution['x'],matrix(obj))
def _hedge_with_positive_constraint(self): ### common set up n = len(self.meanList) P = matrix(self.covMat) pbar = matrix(self.meanList) G = matrix(0.0, (n, n)) G[::n+1] = -1.0 h = matrix(0.0, (n, 1)) key = self.fixedPosition.keys() A = matrix(0.0, (len(key), n)) b = matrix(0.0, (len(key), 1)) for ii in range(len(key)): A[ii, key[ii]] = 1.0 b[ii, 0] = self.fixedPosition[ii] NN = 100 mus = [ 10 ** (10.0 * t/NN - 1.0) for t in range(NN)] portfolios = [solvers.qp(mu * P, -pbar, G, h, A, b)['x'] for mu in mus] returns = [ dot(pbar, x) for x in portfolios ] risks = [np.sqrt(dot(x, P * x)) for x in portfolios] idx = np.argmin(np.abs(np.array(risks) - self.targetRisk)) self.optimizedWeights = np.array(portfolios[idx]).T.tolist()[0] self.optimizedMean = returns[idx] self.optimizedRisk = risks[idx]
def acent(A, b): """ Computes analytic center of A*x <= b with A m by n of rank n. We assume that b > 0 and the feasible set is bounded. """ MAXITERS = 100 ALPHA = 0.01 BETA = 0.5 TOL = 1e-8 ntdecrs = [] m, n = A.size x = matrix(0.0, (n, 1)) H = matrix(0.0, (n, n)) for iter in range(MAXITERS): # Gradient is g = A^T * (1./(b-A*x)). d = (b - A * x) ** -1 g = A.T * d # Hessian is H = A^T * diag(1./(b-A*x))^2 * A. Asc = mul(d[:, n * [0]], A) blas.syrk(Asc, H, trans="T") # Newton step is v = H^-1 * g. v = -g lapack.posv(H, v) # Directional derivative and Newton decrement. lam = blas.dot(g, v) ntdecrs += [sqrt(-lam)] print("%2d. Newton decr. = %3.3e" % (iter, ntdecrs[-1])) if ntdecrs[-1] < TOL: return x, ntdecrs # Backtracking line search. y = mul(A * v, d) step = 1.0 while 1 - step * max(y) < 0: step *= BETA while True: if -sum(log(1 - step * y)) < ALPHA * step * lam: break step *= BETA x += step * v
def dot(X,Y): """ Computes trace product of X and Y. """ assert Y.symb == X.symb, "Symbolic factorization mismatch" assert X.is_factor is False, "cspmatrix factor object" assert Y.is_factor is False, "cspmatrix factor object" snptr = X.symb.snptr sncolptr = X.symb.sncolptr blkptr = X.symb.blkptr val = 0.0 for k in range(X.symb.Nsn): offset = blkptr[k] nk = snptr[k+1]-snptr[k] ck = sncolptr[k+1]-sncolptr[k] for j in range(nk): val -= X.blkval[offset+ck*j+j]*Y.blkval[offset+ck*j+j] val += 2.0*blas.dot(X.blkval,Y.blkval,offsetx=offset+ck*j+j,offsety=offset+ck*j+j,n=ck-j) return val
def getupperprobability(self,subset): """Compute upper probability of an event expressed in binary code. :param subset: the event of interest (a 1xn vector containing 1 for elements in the event, 0 otherwise.) :param type: np.array :returns: upper probability value :rtype: float """ if subset.__class__.__name__!='ndarray': raise Exception('Expecting a numpy array as argument') if subset.size != self.nbDecision: raise Exception('Subset incompatible with the frame size') if not CredalSet.issubsetmask(subset): raise Exception('Array is not 1/0 elements') solution = self.solvelowerexpectation(-subset) if solution['status']!='optimal': return "NA" else: return dot(solution['x'],matrix(subset))
def solve_portfolio_with_cam_correlation(cam_mu, cam_sigma, cam_cor, alpha): n = len(cam_mu) cams = [] cam_idx = {} idx_cam = {} idx = 0 for cam in sorted(cam_mu): cams.append(cam) cam_idx[cam] = idx idx_cam[idx] = cam idx += 1 mu = matrix(0.0, (n, 1)) for cam in cams: mu[cam_idx[cam]] = cam_mu[cam] Sigma = matrix(0.0, (n, n)) for cam1 in cams: idx1 = cam_idx[cam1] for cam2 in cams: idx2 = cam_idx[cam2] idx = idx1 * n + idx2 cor = 0.0 if (cam1, cam2) in cam_cor: cor = cam_cor[(cam1, cam2)] elif (cam2, cam1) in cam_cor: cor = cam_cor[(cam2, cam1)] Sigma[idx] = cor * cam_sigma[cam1] * cam_sigma[cam2] v = solve_portfolio(mu, Sigma, alpha) cam_v = {} for cam in cams: cam_v[cam] = v[cam_idx[cam]] config.cached_mu = mu config.cached_Sigma = Sigma config.cached_idx_cam = idx_cam.copy() config.cached_portfolio_sigma = sqrt(dot(v, Sigma * v)) config.cached_cam_v = cam_v.copy() return cam_v
def solve(x, y, z): # Solve # # [ H A' GG'*W^{-1} ] [ ux ] [ bx ] # [ A 0 0 ] * [ uy [ = [ by ] # [ W^{-T}*GG 0 -I ] [ W*uz ] [ W^{-T}*bz ] # # and return ux, uy, W*uz. # # On entry, x, y, z contain bx, by, bz. On exit, they contain # the solution ux, uy, W*uz. blas.scal(0.0, sltn) blas.copy(x, u) blas.copy(y, u, offsety = n) scale(z, W, trans = 'T', inverse = 'I') pack(z, u, dims, mnl, offsety = n + p) blas.copy(u, r) # Iterative refinement algorithm: # Init: sltn = 0, r_0 = [bx; by; W^{-T}*bz] # 1. u_k = Ktilde^-1 * r_k # 2. sltn += u_k # 3. r_k+1 = r - K*sltn # Repeat until exceed MAX_ITER iterations or ||r|| <= ERROR_BOUND iteration = 0 resid_norm = 1 while iteration <= MAX_ITER and resid_norm > ERROR_BOUND: lapack.sytrs(Ktilde, ipiv, u) blas.axpy(u, sltn, alpha = 1.0) blas.copy(r, u) blas.symv(K, sltn, u, alpha = -1.0, beta = 1.0) resid_norm = math.sqrt(blas.dot(u, u)) iteration += 1 blas.copy(sltn, x, n = n) blas.copy(sltn, y, offsetx = n, n = p) unpack(sltn, z, dims, mnl, offsetx = n + p)
def markowitzReturns( returns, tickers, explain = False): n = len(returns) returns_df = returns returns = np.asmatrix(returns) mus = [10**(5.0 * t/50 - 1.0) for t in range(50)] # Convert to cvxopt matrices Sigma = opt.matrix(np.cov(returns)) q = opt.matrix(np.mean(returns, axis=1)) # Create constraint matrices G = -opt.matrix(np.eye(n)) # negative n x n identity matrix h = opt.matrix(0.0, (n ,1)) # -I*w < 0 i.e. no shorts A = opt.matrix(1.0, (1, n)) # A is all ones so A*w = w b = opt.matrix(1.0) # Dot product sums to 1 # Calculate efficient frontier weights using quadratic programming ports = [solvers.qp(mu*Sigma, -q, G, h, A, b)['x'] for mu in mus] # Calculate risks and returns of frontier returns = [blas.dot(q, x) for x in ports] risks = [np.sqrt(blas.dot(x, Sigma*x)) for x in ports] # Fit polynomial to frontier curve m = np.polyfit(returns, risks, 2) x = np.sqrt(m[2]/m[0]) # Calculate optimal portfolio weights optimal_weights = solvers.qp(opt.matrix(x * Sigma), -q, G, h, A, b)['x'] optimal_return = blas.dot(q, optimal_weights) optimal_risk = np.sqrt(blas.dot(optimal_weights, Sigma*optimal_weights)) # Method to justify this portfolio distribution if asked for if( explain ): date_text = """ -------------------------------------------------------------------------------------------------- Using returns data from {0} to {1} a careful mean - variance analysis was performed. The analysis found a number of portfolios lying on the markowitz efficient frontier and they are found below. The analysis indicates that the optimal portfolio for the next trading day will have the following distribution: """ print date_text.format(returns_df.columns[0],returns_df.columns[len(returns_df.columns)-1]) # Print optimal weights weights = np.asarray(optimal_weights) weights = [float(weights[i]) for i in range(len(weights))] wts = dict(zip(tickers,weights)) for k in wts: weight_text = "\t{0} : {1:.4f}%" print weight_text.format(str(k),float(wts[k])*100) returns_text = """ This portfolio distribution has an expected return of: {0:.4f}%""" print returns_text.format(float(optimal_return)*100) risk_text = """ And the associated risk (standard deviation) is: {0:.4f}""" print risk_text.format(float(optimal_risk)) break_text=""" -------------------------------------------------------------------------------------------------- """ print break_text plt.plot(risks, returns, 'b-o') plt.title('Efficient Portfolios on {}'.format(returns_df.columns[len(returns_df.columns)-1])) plt.ylabel('Returns (%)') plt.xlabel('Risk (STD)') plt.show() return np.asarray(optimal_weights), returns, risks
def test_dot(self): A = cp.cspmatrix(self.symb) + self.A B = cp.cspmatrix(self.symb) - 2*self.A self.assertAlmostEqual(2.0*blas.dot(A.blkval, B.blkval) - blas.dot(A.diag(),B.diag()), cp.dot(A,B))
def xdot(u,v): return blas.dot(u[0], v[0]) + misc.sdot2(u[1], v[1]) + \ misc.sdot2(u[2], v[2])
n = 4 S = matrix( [[ 4e-2, 6e-3, -4e-3, 0.0 ], [ 6e-3, 1e-2, 0.0, 0.0 ], [-4e-3, 0.0, 2.5e-3, 0.0 ], [ 0.0, 0.0, 0.0, 0.0 ]] ) pbar = matrix([.12, .10, .07, .03]) G = matrix(0.0, (n,n)) G[::n+1] = -1.0 h = matrix(0.0, (n,1)) A = matrix(1.0, (1,n)) b = matrix(1.0) N = 100 mus = [ 10**(5.0*t/N-1.0) for t in range(N) ] options['show_progress'] = False xs = [ qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus ] returns = [ dot(pbar,x) for x in xs ] risks = [ sqrt(dot(x, S*x)) for x in xs ] print returns print risks import pylab c1 = [ x[0] for x in xs ] c2 = [ x[0] + x[1] for x in xs ] c3 = [ x[0] + x[1] + x[2] for x in xs ] c4 = [ x[0] + x[1] + x[2] + x[3] for x in xs ]
# with n variables, and matrices A(x), B of size p x q. setseed(0) p, q, n = 100, 100, 100 A = normal(p*q, n) B = normal(p, q) # options['feastol'] = 1e-6 # options['refinement'] = 3 sol = nucnrm.nrmapp(A, B) x = sol['x'] Z = sol['Z'] s = matrix(0.0, (p,1)) X = matrix(A *x, (p, q)) + B lapack.gesvd(+X, s) nrmX = sum(s) lapack.gesvd(+Z, s) nrmZ = max(s) res = matrix(0.0, (n, 1)) blas.gemv(A, Z, res, beta = 1.0, trans = 'T') print "\nNuclear norm of A(x) + B: %e" %nrmX print "Inner product of B and Z: %e" %blas.dot(B, Z) print "Maximum singular value of Z: %e" %nrmZ print "Euclidean norm of A'(Z): %e" %blas.nrm2(res)
# Two variables (t, u). G = matrix(0.0, ((n+1)**2, 2)) G[-1, 0] = -1.0 # coefficient of t G[: (n+1)**2-1 : n+2, 1] = -1.0 # coefficient of u h = matrix( [ [ A.T * A, b.T * A ], [ A.T * b, b.T * b ] ] ) c = matrix(1.0, (2,1)) nopts = 40 alpha1 = [2.0/(nopts//2-1) * alpha for alpha in range(nopts//2) ] + \ [ 2.0 + (15.0 - 2.0)/(nopts//2) * alpha for alpha in range(1,nopts//2+1) ] lbnds = [ blas.nrm2(b)**2 ] for alpha in alpha1[1:]: c[1:] = alpha lbnds += [ -blas.dot(c, solvers.sdp(c, Gs=[G], hs=[h])['x']) ] nopts = 10 alpha2 = [ 1.0/(nopts-1) * alpha for alpha in range(nopts) ] ubnds = [ blas.nrm2(b)**2 ] for alpha in alpha2[1:]: c[1:] = alpha ubnds += [ blas.dot(c, solvers.sdp(c, Gs=[G], hs=[-h])['x']) ] try: import pylab except ImportError: pass else: pylab.figure(1, facecolor='w') pylab.plot(lbnds, alpha1, 'b-', ubnds, alpha2, 'b-') kmax = max([ k for k in range(len(alpha1)) if alpha1[k] < blas.nrm2(xls)**2 ])