def test_dcp_curvature(self): expr = 1 + cvx.exp(cvx.Variable()) self.assertEqual(expr.curvature, s.CONVEX) expr = cvx.Parameter()*cvx.NonNegative() self.assertEqual(expr.curvature, s.AFFINE) f = lambda x: x**2 + x**0.5 expr = f(cvx.Constant(2)) self.assertEqual(expr.curvature, s.CONSTANT) expr = cvx.exp(cvx.Variable())**2 self.assertEqual(expr.curvature, s.CONVEX) expr = 1 - cvx.sqrt(cvx.Variable()) self.assertEqual(expr.curvature, s.CONVEX) expr = cvx.log( cvx.sqrt(cvx.Variable()) ) self.assertEqual(expr.curvature, s.CONCAVE) expr = -( cvx.exp(cvx.Variable()) )**2 self.assertEqual(expr.curvature, s.CONCAVE) expr = cvx.log( cvx.exp(cvx.Variable()) ) self.assertEqual(expr.is_dcp(), False) expr = cvx.entr( cvx.NonNegative() ) self.assertEqual(expr.curvature, s.CONCAVE) expr = ( (cvx.Variable()**2)**0.5 )**0 self.assertEqual(expr.curvature, s.CONSTANT)
def sharpe_ratio(w, expected_returns, cov_matrix, risk_free_rate=0.02, negative=True): """ Calculate the (negative) Sharpe ratio of a portfolio :param w: asset weights in the portfolio :type w: np.ndarray OR cp.Variable :param expected_returns: expected return of each asset :type expected_returns: np.ndarray :param cov_matrix: covariance matrix :type cov_matrix: np.ndarray :param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02. The period of the risk-free rate should correspond to the frequency of expected returns. :type risk_free_rate: float, optional :param negative: whether quantity should be made negative (so we can minimise) :type negative: boolean :return: (negative) Sharpe ratio :rtype: float """ mu = w @ expected_returns sigma = cp.sqrt(cp.quad_form(w, cov_matrix)) sign = -1 if negative else 1 sharpe = (mu - risk_free_rate) / sigma return _objective_value(w, sign * sharpe)
def mpt_opt(data, gamma_vec): NUM_SAMPLES = len(gamma_vec) w_vec_results = [None] * NUM_SAMPLES ret_results = np.zeros(NUM_SAMPLES) risk_results = np.zeros(NUM_SAMPLES) N = len(data) w_vec = Variable(N) mu_vec = np.array([np.mean(data[i]) for i in range(N)]) sigma_mat = np.cov(data) gamma = Parameter(nonneg=True) ret_val = mu_vec.T * w_vec risk_val = quad_form(w_vec, sigma_mat) # w^T Sigma w problem = Problem(Maximize(ret_val - gamma * risk_val), [sum(w_vec) == 1, w_vec >= 0]) for i, new_gamma in enumerate(gamma_vec): gamma.value = new_gamma problem.solve() w_vec_results[i] = w_vec.value ret_results[i] = ret_val.value risk_results[i] = sqrt(risk_val).value return (w_vec_results, ret_results, risk_results)
def test_max(self): x = cp.Variable(2, pos=True) obj = cp.max((1 - 2 * cp.sqrt(x) + x) / x) problem = cp.Problem(cp.Minimize(obj), [x[0] <= 0.5, x[1] <= 0.9]) self.assertTrue(problem.is_dqcp()) problem.solve(SOLVER, qcp=True) self.assertAlmostEqual(problem.objective.value, 0.1715, places=3)
def test_tutorial_example(self): x = cp.Variable() y = cp.Variable(pos=True) objective_fn = -cp.sqrt(x) / y problem = cp.Problem(cp.Minimize(objective_fn), [cp.exp(x) <= y]) # smoke test problem.solve(SOLVER, qcp=True)
def test_power(self) -> None: """Test grad for power. """ expr = cp.sqrt(self.a) self.a.value = 2 self.assertAlmostEqual(expr.grad[self.a], 0.5 / np.sqrt(2)) self.a.value = 3 self.assertAlmostEqual(expr.grad[self.a], 0.5 / np.sqrt(3)) self.a.value = -1 self.assertAlmostEqual(expr.grad[self.a], None) expr = (self.x)**3 self.x.value = [3, 4] self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), np.array([[27, 0], [0, 48]])) expr = (self.x)**3 self.x.value = [-1e-9, 4] self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), np.array([[0, 0], [0, 48]])) expr = (self.A)**2 self.A.value = [[1, -2], [3, 4]] val = np.zeros((4, 4)) + np.diag([2, -4, 6, 8]) self.assertItemsAlmostEqual(expr.grad[self.A].toarray(), val) # Constant. expr = (self.a)**0 self.assertAlmostEqual(expr.grad[self.a], 0) expr = (self.x)**0 self.assertItemsAlmostEqual(expr.grad[self.x].toarray(), np.zeros((2, 2)))
def test_convexify_obj(self): """ Test convexify objective """ obj = cvx.Maximize(cvx.sum(cvx.square(self.x))) self.x.value = [1, 1] obj_conv = convexify_obj(obj) prob_conv = cvx.Problem(obj_conv, [self.x <= -1]) prob_conv.solve() self.assertAlmostEqual(prob_conv.value, -6) obj = cvx.Minimize(cvx.sqrt(self.a)) self.a.value = [1] obj_conv = convexify_obj(obj) prob_conv = cvx.Problem(obj_conv, cvx.sqrt(self.a).domain) prob_conv.solve() self.assertAlmostEqual(prob_conv.value, 0.5)
def sharpe_ratio(w, exp_ret, cov, risk_free_rate=0.02, neg=True): mu = w @ exp_ret sigma = cp.sqrt(cp.quad_form(w, cov)) if neg: return -(mu - risk_free_rate)/sigma else: return (mu - risk_free_rate)/sigma
def test_convexify_obj(self): """ Test convexify objective """ obj = cvx.Maximize(cvx.sum(cvx.square(self.x))) self.x.value = [1,1] obj_conv = convexify_obj(obj) prob_conv = cvx.Problem(obj_conv, [self.x <= -1]) prob_conv.solve() self.assertAlmostEqual(prob_conv.value,-6) obj = cvx.Minimize(cvx.sqrt(self.a)) self.a.value = [1] obj_conv = convexify_obj(obj) prob_conv = cvx.Problem(obj_conv,cvx.sqrt(self.a).domain) prob_conv.solve() self.assertAlmostEqual(prob_conv.value,0.5)
def agl(self, x, y, group_index, param): """ Group lasso penalized solver """ n = x.shape[0] # Check th group_index, find the unique groups, count how many vars are in each group (this is the group size) unique_group_index = np.unique(group_index) group_sizes, beta_var = self._num_beta_var_from_group_index(group_index) num_groups = len(group_sizes) model_prediction = 0 group_lasso_penalization = 0 # If the model has an intercept, we calculate the value of the model for the intercept group_index # We start the penalization in inf_lim so if the model has an intercept, penalization starts after the intercept inf_lim = 0 if self.intercept: # Adds an element (referring to the intercept) to group_index, group_sizes, num groups group_index = np.append(0, group_index) unique_group_index = np.unique(group_index) x = np.c_[np.ones(n), x] group_sizes = [1] + group_sizes beta_var = [cvxpy.Variable(1)] + beta_var num_groups = num_groups + 1 # Compute model prediction for the intercept with no penalization model_prediction = x[:, np.where(group_index == unique_group_index[0])[0]] @ beta_var[0] inf_lim = 1 gl_weights_param = cvxpy.Parameter(num_groups, nonneg=True) for i in range(inf_lim, num_groups): model_prediction += x[:, np.where(group_index == unique_group_index[i])[0]] @ beta_var[i] group_lasso_penalization += cvxpy.sqrt(group_sizes[i]) * gl_weights_param[i] * cvxpy.norm(beta_var[i], 2) if self.model == 'lm': objective_function = (1.0 / n) * cvxpy.sum_squares(y - model_prediction) else: objective_function = (1.0 / n) * cvxpy.sum(self._quantile_function(x=(y - model_prediction))) objective = cvxpy.Minimize(objective_function + group_lasso_penalization) problem = cvxpy.Problem(objective) beta_sol_list = [] # Solve the problem iteratively for each parameter value for lam, gl in param: gl_weights_param.value = lam * gl # Solve the problem. Try first default CVXPY option, which is usually optimal for the problem. If a # ValueError arises, try the solvers provided as input to the method. try: problem.solve(warm_start=True) except (ValueError, cvxpy.error.SolverError): for elt in self.solver: solver_dict = self._cvxpy_solver_options(solver=elt) try: problem.solve(**solver_dict) if 'optimal' in problem.status: break except (ValueError, cvxpy.error.SolverError): continue if problem.status in ["infeasible", "unbounded"]: logging.warning('Optimization problem status failure') beta_sol = np.concatenate([b.value for b in beta_var], axis=0) beta_sol[np.abs(beta_sol) < self.tol] = 0 beta_sol_list.append(beta_sol) return beta_sol_list
def min_vol(self): """ Optimise to find the Minimum Volatility Portfolio """ self._set_cvx_risk_rtn_params() # setup risk & rtn formula # Setup CVXPY problem and solve for minimum variance objective = cvx.Minimize(self.risk) cvx.Problem(objective, self.constraints).solve() return self.w.value.round(4), cvx.sqrt(self.risk).value, self.rtn.value
def solve_hd(train_distrib, test_distrib, n_classes, solver='ECOS'): prevalences = cvxpy.Variable(n_classes) s = cvxpy.multiply(np.squeeze(test_distrib), train_distrib * prevalences) objective = cvxpy.Minimize(1 - cvxpy.sum(cvxpy.sqrt(s))) constraints = [cvxpy.sum(prevalences) == 1, prevalences >= 0] prob = cvxpy.Problem(objective, constraints) prob.solve(solver=solver) return np.array(prevalences.value).squeeze()
def solve_hd(train_dist, test_dist, n_classes, solver="ECOS"): p = cvxpy.Variable(n_classes) s = cvxpy.mul_elemwise(test_dist, (train_dist.T * p)) objective = cvxpy.Minimize(1 - cvxpy.sum_entries(cvxpy.sqrt(s))) contraints = [cvxpy.sum_entries(p) == 1, p >= 0] prob = cvxpy.Problem(objective, contraints) prob.solve(solver=solver) return np.array(p.value).squeeze()
def constr(self, x, phi, log_cash): expr1 = cvx.log(sum(a*cvx.sqrt(x[g]) for g, a in self.a.iteritems())) def to_constant(x): """return violation if constant, constraint if variable""" return -x.value if x.is_constant() else x >= 0 return [to_constant(expr1 - np.log(a) + (1-self.rho)*cvx.log(x[g]) + phi[g] - log_cash) for g, a in self.a.iteritems()]
def Get_Efficient_Frontier_CP(self): self.data() #self.portfolio_metrics() self.avg_returns = self.avg_returns.values self.cov_mat = self.cov_mat.values #optimization problem self.weights = cp.Variable(self.n_assets) self.gamma = cp.Parameter(nonneg=True) self.portf_rtn_cvx = self.avg_returns @ self.weights self.portf_vol_cvx = cp.quad_form(self.weights, self.cov_mat) self.objective_function = cp.Maximize(self.portf_rtn_cvx - self.gamma * \ self.portf_vol_cvx) self.problem = cp.Problem( self.objective_function, [cp.sum(self.weights) == 1, self.weights >= 0]) #Calculate Efficient Frontier self.n_points = 25 self.portf_rtn_cvx_ef = np.zeros(self.n_points) self.portf_vol_cvx_ef = np.zeros(self.n_points) self.weights_ef = [] self.gamma_range = np.logspace(-3, 3, num=self.n_points) for i in range(self.n_points): self.gamma.value = self.gamma_range[i] self.problem.solve() self.portf_vol_cvx_ef[i] = cp.sqrt(self.portf_vol_cvx).value self.portf_rtn_cvx_ef[i] = self.portf_rtn_cvx.value self.weights_ef.append(self.weights.value) #plotting risk averseness self.weights_df = pd.DataFrame(self.weights_ef, columns=self.risky_assets, index=np.round(self.gamma_range, 3)) ax = self.weights_df.plot(kind='bar', stacked=True) ax.set(title='Weights allocation per risk-aversion level', xlabel=r'$\gamma$', ylabel='weight') ax.legend(bbox_to_anchor=(1, 1)) #plotting efficient frontier fig, ax = plt.subplots() ax.plot(self.portf_vol_cvx_ef, self.portf_rtn_cvx_ef, 'g--') for asset_index in range(self.n_assets): plt.scatter(x=np.sqrt(self.cov_mat[asset_index, asset_index]), y=self.avg_returns[asset_index], marker=self.marks[asset_index], label=self.risky_assets[asset_index], s=150) ax.set(title='Efficient Frontier', xlabel='Volatility', ylabel='Expected Returns') ax.legend() plt.show()
def efficeint_frontier_solver(data, sample=500): # data = data_delta # 算出有效边界的顶点 n = len(data.columns) w = cvx.Variable(n) return_vec = data.values.T mu = np.asmatrix(data.mean()).T ret = mu.T * w C = np.asmatrix(np.cov(return_vec)) risk = cvx.quad_form(w, C) prob0 = cvx.Problem(cvx.Minimize(risk), [cvx.sum_entries(w) == 1, w >= 0, ]) prob0.solve() # print(w0.value) mu_min = ret.value risk_data = [] ret_data = [] weight_data = [] # 沿着有效边界的顶点向右移动 delta = cvx.Parameter(sign='positive') prob = cvx.Problem(cvx.Minimize(risk), [cvx.sum_entries(w) == 1, w >= 0, ret == mu_min + delta]) for i in np.linspace(0, 5, sample): delta.value = i prob.solve() risk_min = cvx.sqrt(risk).value if risk_min == float('inf') or risk_min is None: break risk_data.append(cvx.sqrt(risk).value) # print(cvx.sqrt(risk).value) ret_data.append(ret.value) weight_data.append(w.value) # print(ret.value) # print(w.value) return risk_data, ret_data, weight_data
def test_concave_frac(self): x = cp.Variable(nonneg=True) concave_frac = cp.sqrt(x) / cp.exp(x) self.assertTrue(concave_frac.is_dqcp()) self.assertTrue(concave_frac.is_quasiconcave()) self.assertFalse(concave_frac.is_quasiconvex()) problem = cp.Problem(cp.Maximize(concave_frac)) self.assertTrue(problem.is_dqcp()) problem.solve(SOLVER, qcp=True) self.assertAlmostEqual(problem.objective.value, 0.428, places=1) self.assertAlmostEqual(x.value, 0.5, places=1)
def risk_aversion(self, gamma=0): """ Optimisation using a Risk-Aversion parameter Solve: Maximise[Return - Risk] = [w * mu - (gamma/2)* w * vcv * w.T] """ self._set_cvx_risk_rtn_params() # setup risk & rtn formula self.gamma.value = gamma # setup CVXPY problem and solve for max rtn given risk aversion level objective = cvx.Maximize(self.rtn - (gamma / 2) * self.risk) cvx.Problem(objective, self.constraints).solve() # Solve & Save Output return self.w.value.round(4), cvx.sqrt(self.risk).value, self.rtn.value
def plot_portfolios(rets, sols): for i in sols: plt.plot(i['sd'], i['mean'], 'bs') p = rets.mean().as_matrix() covs = rets.cov().as_matrix() for i in range(len(p)): plt.plot(cvxpy.sqrt(covs[i, i]).value, p[i], 'ro') plt.xlabel('Standard deviation') plt.ylabel('Return') plt.show()
def l1_optimize_with_noise(A, B, noise, Phi, verbose=False): n = A.shape[1] x = cp.Variable(shape=(n, 1)) objective = cp.Minimize(cp.norm(x, 1)) constraints = [ cp.norm(cp.matmul(A, x) - B) <= 4 * noise * cp.sqrt(n), cp.matmul(Phi, x) >= 0, cp.matmul(Phi, x) <= 255 ] prob = cp.Problem(objective, constraints) result = prob.solve(verbose=verbose) return x.value
def learn_with_similarity_label(self, data, label, mode, **kwargs): """ Implement the metric learning algorithm in "Distance metric learning, with application to clustering with side-information" by Eric P. Xing, et al. The alg learns distance metrics from similarity labels of data pairs """ n_feature = data[0][0].shape[0] index_s = [i for i, x in enumerate(label) if x == 1] index_ns = [i for i, x in enumerate(label) if x == 0] X_s = [list(data[i][0] - data[i][1]) for i in index_s] X_ns = [list(data[i][0] - data[i][1]) for i in index_ns] # mode tells if the learned Mahalanobis distance metrics are specified by a diagonal matrix or a # fully-parametrized matrix if mode == "diag": x = cvx.Variable(rows=n_feature, cols=1) obj = 0 for i in range(len(X_s)): obj = obj + sum_entries(mul_elemwise(np.square(X_s[i]), x)) obj_neg = 0 for i in range(len(X_ns)): obj_neg = obj_neg + sqrt( sum_entries(mul_elemwise(np.square(X_ns[i]), x))) obj = obj - cvx.log(obj_neg) constraints = [x >= 0] obj_cvx = cvx.Minimize(obj) prob = cvx.Problem(obj_cvx, constraints) prob.solve(solver=SCS) x_mat = np.diag(x.value.transpose().tolist()[0]) if prob.status == 'optimal' or prob.status == 'optimal_inaccurate': return prob.status, prob.value, x_mat else: return prob.status, np.nan, np.nan if mode == "full": lam = 1 A = cvx.Semidef(n_feature) obj = 0 for i in range(len(X_s)): obj = obj + cvx.quad_form(X_s[i], A) obj += lam * norm(A, 1) const = 0 for i in range(len(X_ns)): const = const + cvx.sqrt(cvx.quad_form(X_ns[i], A)) constraints = [const >= 1] obj_cvx = cvx.Minimize(obj) prob = cvx.Problem(obj_cvx, constraints) prob.solve(solver=MOSEK) if prob.status == 'optimal' or prob.status == 'optimal_inaccurate': return prob.status, prob.value, A.value else: return prob.status, np.nan, np.nan
def run_opt(self): """ Run optimization for the worst case risk over possible covariance matrices """ sigma_opt = cp.Variable((self.n, self.n), PSD=True) # positive semi-definite delta = cp.Variable( (self.n, self.n), symmetric=True ) # difference between the input covar and the testing ones risk = cp.quad_form(self.w, sigma_opt) # elementwise delta constrained and must be zero on diagonals constraints_ls = [ sigma_opt == self.sigma + delta, cp.diag(delta) == 0, cp.abs(delta) <= 0.2 ] prob = cp.Problem(cp.Maximize(risk), constraints_ls) prob.solve() return { 'actual_std': cp.sqrt(cp.quad_form(self.w, self.sigma)).value, 'worst_case_std': cp.sqrt(risk).value, 'delta': delta.value }
def f(): x = cp.Variable() y = cp.Variable() obj = cp.Minimize(x) constraints = [ (x + y)**2 / cp.sqrt(y) <= x - y + 5 ] problem = cp.Problem(obj, constraints) print(f"f before: {problem.is_dcp()}") x = cp.Variable() y = cp.Variable() a = cp.Variable() b = cp.Variable() obj = cp.Minimize(x) constraints = [ cp.quad_over_lin(a, b) <= x - y + 5, a == x + y, b <= cp.sqrt(y), ] problem = cp.Problem(obj, constraints) print(f"f after: {problem.is_dcp()}") problem.solve()
def mean_variance_builder( er: np.ndarray, risk_model: Dict[str, Union[None, np.ndarray]], bm: np.ndarray, lbound: Union[np.ndarray, float], ubound: Union[np.ndarray, float], risk_exposure: Optional[np.ndarray], risk_target: Optional[Tuple[np.ndarray, np.ndarray]], lam: float = 1., linear_solver: str = 'ma27') -> Tuple[str, float, np.ndarray]: lbound, ubound, cons_mat, clbound, cubound = _create_bounds( lbound, ubound, bm, risk_exposure, risk_target) if np.all(lbound == -np.inf) and np.all( ubound == np.inf) and cons_mat is None: # using fast path cvxpy n = len(er) w = cvxpy.Variable(n) cov = risk_model['cov'] special_risk = risk_model['idsync'] risk_cov = risk_model['factor_cov'] risk_exposure = risk_model['factor_loading'] if cov is None: risk = cvxpy.sum_squares(cvxpy.multiply(cvxpy.sqrt(special_risk), w)) \ + cvxpy.quad_form((w.T * risk_exposure).T, risk_cov) else: risk = cvxpy.quad_form(w, cov) objective = cvxpy.Minimize(-w.T * er + 0.5 * lam * risk) prob = cvxpy.Problem(objective) prob.solve(solver='ECOS', feastol=1e-9, abstol=1e-9, reltol=1e-9) if prob.status == 'optimal' or prob.status == 'optimal_inaccurate': return 'optimal', prob.value, np.array(w.value).flatten() + bm else: raise PortfolioBuilderException(prob.status) else: optimizer = QPOptimizer(er, risk_model['cov'], lbound, ubound, cons_mat, clbound, cubound, lam, risk_model['factor_cov'], risk_model['factor_loading'], risk_model['idsync'], linear_solver=linear_solver) return _create_result(optimizer, bm)
def test_quad_over_lin(self): # Test quad_over_lin DCP. atom = cp.quad_over_lin(cp.square(self.x), self.a) self.assertEqual(atom.curvature, s.CONVEX) atom = cp.quad_over_lin(-cp.square(self.x), self.a) self.assertEqual(atom.curvature, s.CONVEX) atom = cp.quad_over_lin(cp.sqrt(self.x), self.a) self.assertEqual(atom.curvature, s.UNKNOWN) assert not atom.is_dcp() # Test quad_over_lin shape validation. with self.assertRaises(Exception) as cm: cp.quad_over_lin(self.x, self.x) self.assertEqual(str(cm.exception), "The second argument to quad_over_lin must be a scalar.")
def target_vol(self, vol=0): """ Optimise to Maximise Return subject to volatility constraint. In the absence of a vol target will seek to maximise return """ self._set_cvx_risk_rtn_params() # setup risk & rtn formula # Update Constraints - Important that this includes the VOL TARGET constraints = self.constraints.copy() #[constraints.append(i) for i in [sum(self.w)==1, self.w >= 0]] if vol > 0: constraints.append(self.risk <= (np.float64(vol)**2)) # setup CVXPY problem and solve for population mean objective = cvx.Maximize(self.rtn) cvx.Problem(objective, constraints).solve() return self.w.value.round(4), cvx.sqrt(self.risk).value, self.rtn.value
def long_trade_off_curve(): n = len(mu) markers_on = [29, 40] fig = plt.figure() ax = fig.add_subplot(111) plt.plot(risk_data, ret_data, 'g-') for marker in markers_on: plt.plot(risk_data[marker], ret_data[marker], 'bs') #ax.annotate(r"$\gamma = %.2f$" % gamma_vals[marker], xy=(risk_data[marker]+.08, ret_data[marker]-.03)) for i in range(n): plt.plot(cvx.sqrt(cov_mat[i, i]).value, mu[i], 'ro') plt.xlabel('Standard deviation') plt.ylabel('Return') plt.show() return
def _get_f_error(self, idx): # get relevant coefficients for desired index set coeffs = self._coeffs[idx].cpu().numpy() coeffs[coeffs < 0.0] = 0.0 # define variables and parameters num_var = coeffs.shape[0] x_arg = cp.Variable(num_var) alpha = cp.Parameter(num_var, nonneg=True) alpha.value = coeffs # construct symbolic error vector for theoretical error per filter k_constant = 3 expr = cp.vstack([ cp.multiply(cp.inv_pos(x_arg), alpha / k_constant), cp.multiply(cp.inv_pos(cp.sqrt(x_arg)), cp.sqrt(6 * alpha / k_constant)), ]) f_error = cp.norm(expr, axis=0) + cp.multiply(cp.inv_pos(x_arg), alpha / k_constant) f_error = 1 / 2 * f_error # return argument and symbolic error function to argument return x_arg, f_error
def primal(self, x_hat): x = cvx.Variable(self.no_of_item) a = self.utility_coeffs_linear obj = cvx.Maximize(a.T * x + float(4) / self.mu * cvx.sum_entries(cvx.sqrt(x))) constraints = [x >= 0, x <= 1, x <= x_hat] prob = cvx.Problem(obj, constraints) prob.solve() if (prob.status == 'optimal'): return prob.value, np.array(x.value).ravel(), np.array( constraints[2].dual_value).ravel()
def test_cvxpy(): """ 测试cvxpy包的使用 :return: """ #计算(x-y)^2的最小值 # x=cvxpy.Variable() # y=cvxpy.Variable() # constraints=[x+y==1,x-y>=1] # obj1=cvxpy.Minimize(cvxpy.square(x-y)) # prob1=cvxpy.Problem(obj1,constraints) # prob1.solve() # print("非线性规划1") # print("status:", prob1.status) # print("optimal:", prob1.value) # print("optimal var:", x.value, y.value) #计算(x)^2的最小值 print("线性规划1") x = cvxpy.Variable(name='x') y=cvxpy.Variable(name='y') pow_exper=cvxpy.power(x,3) obj2=cvxpy.Minimize(pow_exper) constraints2=[x<=-2] constraints2.append(x>=0) prob2=cvxpy.Problem(obj2,constraints2) prob2.solve() print("status:",prob2.status) print("optimal:",prob2.value) print("optimal var:",x.value) #验证是否符合DCP 凸线性规划条件 print("cvxpy.Minimize(cvxpy.square(x))", cvxpy.Minimize(cvxpy.square(x)).is_dcp()) prob3=cvxpy.Problem(cvxpy.Minimize(cvxpy.power(x,3)),[x>=-1]) print("cvxpy.Minimize(cvxpy.power(x,3))",cvxpy.Minimize(cvxpy.power(x,3)).is_dcp()) print("prob3",prob3.is_dcp()) print("cvxpy.Minimize(cvxpy.sqrt(x))",cvxpy.Minimize(cvxpy.sqrt(x)).is_dcp()) print("cvxpy.Minimize(cvxpy.log(x))",cvxpy.Minimize(cvxpy.log(x)).is_dcp()) print("cvxpy.Minimize(x*y)",cvxpy.Minimize(x*y).is_dcp()) print("cvxpy.Minimize(cvxpy.log(x*y))",cvxpy.Minimize(cvxpy.log(x*y)).is_dcp()) print("cvxpy.Minimize(cvxpy.log(x)",cvxpy.Minimize(cvxpy.log(x)).is_dcp()) print("cvxpy.Maximize(cvxpy.log(x)",cvxpy.Maximize(cvxpy.log(x)).is_dcp()) pass
def test_convexify_constr(self): """ Test convexify constraint """ constr = cvx.norm(self.x) >= 1 self.x.value = [1,1] constr_conv = convexify_constr(constr) prob_conv = cvx.Problem(cvx.Minimize(cvx.norm(self.x)), [constr_conv[0]]) prob_conv.solve() self.assertAlmostEqual(prob_conv.value,1) constr = cvx.sqrt(self.a) <= 1 self.a.value = [1] constr_conv = convexify_constr(constr) prob_conv = cvx.Problem(cvx.Minimize(self.a), [constr_conv[0],constr_conv[1][0]]) prob_conv.solve() self.assertAlmostEqual(self.a.value[0],0)
def OBJ(X, Y, n_, N, d_, P): U = cv.Parameter(n_, N) # *X.shape UUT = cv.Parameter(n_, n_) dd = cv.Parameter(N, sign="positive") # THIS AFFECTS CONVEXITY!!!!!! # DIDN'T KNOW UNTIL ACCIDENTALLY MADE dd A PARAMETER U.value = X - Y UUT.value = np.dot(X - Y, (X - Y).T) dd.value = d_ term1 = sum(d_**2) # term2 = sum([dd[i]*cv.sqrt(cv.quad_form(U[:, i], P)) for i in range(N)]) term2 = dd.T * cv.sqrt(cv.diag(U.T * P * U)) term3 = cv.trace(UUT * P) obj = 1 / N * cv.Minimize(term1 - 2 * term2 + term3) return obj
def find_LF_opt(cons_eq,b): C = [ a[:-1] for a in cons_eq] #C = [ [ (-1)**(b<0)*sqrt(abs(b)) for b in a[:-1] ] for a in cons_eq] dt = [a[-1] for a in cons_eq] N = len(b) x = cp.Variable(N) # nu y = cp.Variable() # mu param = cp.Parameter((1,len(b)),nonneg=True,value=np.reshape(np.array(b),(1,len(b)))) #objective = cp.Minimize(param*(x-cp.log(x))) objective = cp.Minimize(cp.sqrt(param)*x-param*cp.log(x)) constraints = [np.array([MIN_NU]*N) <= x, MIN_MU <= y, csr_matrix(C)*x+y*np.array(dt)==0] prob = cp.Problem(objective, constraints) fx = prob.solve(verbose=True) #,mosek_params={'MSK_DPAR_BASIS_REL_TOL_S':1e-20,'MSK_DPAR_BASIS_TOL_S':1e-9,'MSK_DPAR_BASIS_TOL_X':1e-9}) x_opt = np.append([ x/sqrt(a) for (x,a) in zip (x.value,b) ],y.value) return x_opt,fx
def test_power(self) -> None: """Test domain for power. """ dom = cp.sqrt(self.a).domain Problem(Minimize(self.a), dom).solve(solver=cp.SCS, eps=1e-6) self.assertAlmostEqual(self.a.value, 0) dom = cp.square(self.a).domain Problem(Minimize(self.a), dom + [self.a >= -100]).solve(solver=cp.SCS, eps=1e-6) self.assertAlmostEqual(self.a.value, -100) dom = ((self.a)**-1).domain Problem(Minimize(self.a), dom + [self.a >= -100]).solve(solver=cp.SCS, eps=1e-6) self.assertAlmostEqual(self.a.value, 0) dom = ((self.a)**3).domain Problem(Minimize(self.a), dom + [self.a >= -100]).solve(solver=cp.SCS, eps=1e-6) self.assertAlmostEqual(self.a.value, 0)
mean_long = np.zeros(n) std_long = np.zeros(n) mean_totalshort = np.zeros(n) std_totalshort = np.zeros(n) constraints_long = [cvx.sum_entries(x) == 1, x>= 0] constraints_totalshort = [cvx.sum_entries(x )== 1] for i in range (n): constraints_totalshort += [1 * cvx.neg(x[i]) <= 0.5] for i, mu in enumerate(mus): #print('mu = {}', format(mu)) objective = cvx.Minimize(-pbarm.T*x + mu * cvx.quad_form(x,S) ) #Long-only prob_long = cvx.Problem(objective, constraints_long) prob_long.solve() #print('status ={}',prob_long.status) mean_long[i] = (pbarm.T*x).value #return std_long[i] = cvx.sqrt(cvx.quad_form(x,S)).value #sqrt(risk) #Total short prob_totalshort = cvx.Problem(objective, constraints_totalshort) prob_totalshort.solve() #print('status ={}',prob_long.status) mean_totalshort[i] = (pbarm.T*x).value #return std_totalshort[i] = cvx.sqrt(cvx.quad_form(x,S)).value #sqrt(risk) plt.plot(std_long, mean_long, label='long-only') plt.plot(std_totalshort,mean_totalshort, label='total short') plt.xlabel('standard deviation of return') plt.ylabel('mean return') plt.legend(loc='lower right', frameon=False);
def cvx_eval(self, x): if self.rho != .5: raise ValueError('need to implement general powers.') return sum(self.a[g]*cvx.sqrt(x[g]) for g in self.a)
def tps_fit3_normals_cvx(x_na, y_ng, bend_coef, rot_coef, normal_coef, wt_n, nwsize=0.02, use_dot=False): if wt_n is None: wt_n = np.ones(len(x_na)) n,d = x_na.shape K_nn = tps.tps_kernel_matrix(x_na) _,_,VT = nlg.svd(np.c_[x_na,np.ones((x_na.shape[0],1))].T) Nmat = VT.T[:,d+1:] rot_coefs = np.diag(np.ones(d) * rot_coef if np.isscalar(rot_coef) else rot_coef) # Generate the normals e_x = tps_utils.find_all_normals_naive(x_na, nwsize, flip_away=True, project_lower_dim=True) e_y = tps_utils.find_all_normals_naive(y_ng, nwsize, flip_away=True, project_lower_dim=True) if d == 3: x_diff = np.transpose(x_na[None,:,:] - x_na[:,None,:],(0,2,1)) Pmat = e_x.dot(x_diff)[range(n),range(n),:]/(K_nn+1e-20) else: raise NotImplementedError A = cp.Variable(Nmat.shape[1],d) B = cp.Variable(d,d) c = cp.Variable(d,1) X = co.matrix(x_na) Y = co.matrix(y_ng) EX = co.matrix(e_x) EY = co.matrix(e_y) K = co.matrix(K_nn) N = co.matrix(Nmat) P = co.matrix(Pmat) W = co.matrix(np.diag(wt_n)) R = co.matrix(rot_coefs) ones = co.matrix(np.ones((n,1))) constraints = [] # For correspondences V1 = cp.Variable(n,d) constraints.append(V1 == Y-K*N*A-X*B - ones*c.T) V2 = cp.Variable(n,d) constraints.append(V2 == cp.sqrt(W)*V1) # For normals if use_dot: # import IPython # IPython.embed() N1 = cp.Variable(n,n) constraints.append(N1 == (P*N*A-EX*B)*EY.T) # N2 = cp.Variable(n) # constraints.extend([N2[i] == N1[i,i] for i in xrange(n)]) else: N1 = cp.Variable(n,d) constraints.append(N1 == EY-P*N*A-EX*B) N2 = cp.Variable(n,d) constraints.append(N2 == cp.sqrt(W)*N1) # For bending cost Vb = [] Q = [] # for quadratic forms for i in range(d): Vb.append(cp.Variable(Nmat.shape[1],1)) constraints.append(Vb[-1] == A[:,i]) Q.append(cp.quad_form(Vb[-1], N.T*K*N)) # For rotation cost V3 = cp.Variable(d,d) constraints.append(V3 == cp.sqrt(R)*B) # Orthogonality constraints for bending constraints.extend([X.T*A == 0, ones.T*A == 0]) # TPS objective if use_dot: objective = cp.Minimize(cp.sum_squares(V2) - normal_coef*sum([N1[i,i] for i in xrange(n)]) + bend_coef*sum(Q) + cp.sum_squares(V3)) else: objective = cp.Minimize(cp.sum_squares(V2) + normal_coef*cp.sum_squares(N2) + bend_coef*sum(Q) + cp.sum_squares(V3)) p = cp.Problem(objective, constraints) p.solve() # import IPython # IPython.embed() return np.array(B.value), np.squeeze(np.array(c.value)) , np.array(A.value)
tau_1 = cvx.Variable(N) #tau_2 = cvx.Variable(N) tau_2 = np.zeros(N) print tau_2, b print b.shape, tau_2.shape #inv_alpha_1 = cvx.Variable(1) inv_alpha_1 = 0.5 # specify objective function #obj = cvx.Minimize(-cvx.sum_entries(cvx.log(tau_1)) - cvx.log_det(A - cvx.diag(tau_1))) #obj = cvx.Minimize(-cvx.sum_entries(cvx.log(tau_1)) - cvx.log_det(A - cvx.diag(tau_1))) # original #obj = cvx.Minimize( 0.5*N*(inv_alpha_1-1)*log_2_pi - 0.5*inv_alpha_1*(N*cvx.log(1/inv_alpha_1) + cvx.sum_entries(cvx.log(tau_1))) + 0.5*cvx.sum_entries(cvx.square(tau_2)/tau_1) + inv_alpha_1*cvx.sum_entries(log_normcdf(tau_2*cvx.sqrt(1/(inv_alpha_1*tau_1)))) +0.5*N*(1-inv_alpha_1)*cvx.log(1-inv_alpha_1) -0.5*(1-inv_alpha_1)*cvx.log_det(A-cvx.diag(tau_1)) + 0.5*cvx.matrix_frac(b-tau_2, A-cvx.diag(tau_1)) ) # modifications #obj = cvx.Minimize( 0.5*N*(inv_alpha_1-1)*log_2_pi - 0.5*inv_alpha_1*(-N*cvx.log(inv_alpha_1) + cvx.sum_entries(cvx.log(tau_1))) + 0.5*cvx.matrix_frac(tau_2, cvx.diag(tau_1)) + inv_alpha_1*cvx.sum_entries(log_normcdf(tau_2.T*cvx.inv_pos(cvx.sqrt(inv_alpha_1*tau_1)))) +0.5*N*(1-inv_alpha_1)*cvx.log(1-inv_alpha_1) -0.5*(1-inv_alpha_1)*cvx.log_det(A-cvx.diag(tau_1)) + 0.5*cvx.matrix_frac(b-tau_2, A-cvx.diag(tau_1)) ) obj = cvx.Minimize( 0.5*N*(inv_alpha_1-1)*log_2_pi - 0.5*inv_alpha_1*(-N*cvx.log(inv_alpha_1) + cvx.sum_entries(cvx.log(tau_1))) + 0.5*cvx.matrix_frac(tau_2, cvx.diag(tau_1)) + cvx.sum_entries(inv_alpha_1*log_normcdf(cvx.inv_pos(cvx.sqrt(inv_alpha_1*tau_1)))) )# +0.5*N*(1-inv_alpha_1)*cvx.log(1-inv_alpha_1) -0.5*(1-inv_alpha_1)*cvx.log_det(A-cvx.diag(tau_1)) + 0.5*cvx.matrix_frac(b-tau_2, A-cvx.diag(tau_1)) ) #def upper_bound_logpartition(tau, inv_alpha_1): # tau_1, tau_2 = tau[:D+N], tau[D+N:] # tau_1_N, tau_2_N = tau_1[D:], tau_2[D:] # first D values correspond to w # alpha_1 = 1.0 / inv_alpha_1 # inv_alpha_2 = 1 - inv_alpha_1 # if np.any(tau_1 <= 0): # integral_1 = INF2 # else: # integral_1 = inv_alpha_1 * (-0.5 * ((D+N)*np.log(alpha_1) + np.sum(np.log(tau_1)) ) \ # + np.sum(norm.logcdf(np.sqrt(alpha_1)*tau_2_N/np.sqrt(tau_1_N)))) \ # + 0.5 * np.sum(np.power(tau_2, 2) / tau_1) # mat = A - np.diag(tau_1) # sign, logdet = np.linalg.slogdet(mat)
def clear_FTR_market(): """Based on current FTR bidding strategies, clear the market.""" number_of_nodes = len(gv.nodes) + len(gv.sources) FTR_price = np.zeros((number_of_nodes,number_of_nodes)) allocation_HENK = cp.Variable(len(gv.FTR_list)) allocation_BERT = cp.Variable(len(gv.FTR_list)) allocation = [] for i in range(len(gv.FTR_list)): allocation.append (allocation_HENK[i] + allocation_BERT[i]) Henk = gv.producers[0] Bert = gv.producers[1] constraints = [] for line in gv.edges: this_constraint = 0 for ftr_index, ftr in enumerate(gv.FTR_list): if ftr[0] == gv.slack: term = -1 * gv.ptdf_matrix[line.uid,ftr[1]] * allocation[ftr_index] this_constraint += term elif ftr[1] == gv.slack: term = gv.ptdf_matrix[line.uid,ftr[0]] * allocation[ftr_index] this_constraint += term else: term = (gv.ptdf_matrix[line.uid,ftr[0]] - gv.ptdf_matrix[line.uid,ftr[1]]) * allocation[ftr_index] this_constraint += term pos_addition = [this_constraint <= line.capacity] neg_addition = [-1 * this_constraint <= line.capacity] constraints.extend(pos_addition) constraints.extend(neg_addition) for index, i in enumerate(allocation_HENK): new = [i >= 0] if Henk.eps[index][0] < 0: new = [i == 0] constraints.extend(new) for index, i in enumerate(allocation_BERT): new = [i >= 0] if Bert.eps[index][0] < 0: new = [i == 0] constraints.extend(new) #Time to set up Objective function gain_HENK = sum(Henk.eps[i][0] * allocation_HENK[i] for i in range(len(allocation_HENK))) - \ sum(cp.square(cp.sqrt(Henk.eps[i][1]) * allocation_HENK[i]) for i in range(len(allocation_HENK))) gain_BERT = sum(Bert.eps[i][0] * allocation_BERT[i] for i in range(len(allocation_BERT))) - \ sum(cp.square(cp.sqrt(Bert.eps[i][1]) * allocation_BERT[i]) for i in range(len(allocation_BERT))) total_gain = gain_BERT + gain_HENK #Solve the problem p = cp.Problem(cp.Maximize(total_gain), constraints) p.solve() #Determine willingness to pay to set price. WtP = np.zeros((number_of_nodes,number_of_nodes)) for i in xrange(len(allocation_HENK)): WtP[gv.FTR_list[i][0],gv.FTR_list[i][1]] = round(max(0,max(Henk.eps[i][0] - \ 2 * Henk.eps[i][1] * allocation_HENK[i].value, Bert.eps[i][0] - \ 2 * Bert.eps[i][1] * allocation_BERT[i].value)),2) FTRs = create_FTR_list() Henk_buys = [] Bert_buys = [] for index,item in enumerate(allocation_HENK): newitem = [item.value,FTRs[index][0],FTRs[index][1]] Henk_buys.append(newitem) for index,item in enumerate(allocation_BERT): newitem = [item.value,FTRs[index][0],FTRs[index][1]] Bert_buys.append(newitem) #return FTR_price, Henk_buys, Bert_buys return WtP, Henk_buys, Bert_buys
def tps_fit_normals_cvx(x_na, y_ng, e_x = None, e_y = None, bend_coef=0.1, rot_coef=1e-5, normal_coef = 0.1, wt_n=None, delta=0.0001, nwsize=0.02): """ Fits normals and points all at once. delta: edge length """ n,d = x_na.shape if wt_n is None: wt_n = co.matrix(np.ones(len(x_na))) # Normals if e_x is None: e_x = tu.find_all_normals_naive(x_na, nwsize, flip_away=True, project_lower_dim=(d==3)) if e_y is None: e_y = tu.find_all_normals_naive(y_ng, nwsize, flip_away=True, project_lower_dim=(d==3)) K_nn = tu.tps_kernel_mat(x_na) Qmat = np.c_[np.ones((n,1)),x_na] Lmat = np.r_[np.c_[K_nn,Qmat],np.c_[Qmat.T,np.zeros((d+1,d+1))]] Mmat = np.zeros((n,n)) Pmat = np.zeros((n,n)) # Get rid of these for loops at some point for i in range(n): pi, ni = x_na[i,:], e_x[i,:] for j in range(n): if i == j: Mmat[i,i] = Pmat[i,i] = 0 else: pj, nj = x_na[j,:], e_x[j,:] Mmat[i,j] = tu.deriv_U(pj,pi,nj,d) if i < j: Pmat[i,j] = Pmat[j,i] = tu.deriv2_U(pi,pj,nj,ni,d) #Mmat = np.r_[Mmat,np.zeros((1,n)),e_x.T] # import IPython # IPython.embed() DKmat = -2*(np.diag([np.log(delta)]*n)) - Pmat Emat = np.r_[np.c_[K_nn, Mmat],np.c_[Mmat.T, DKmat]] # working with the kernel of the orthogonality constraints OCmat = np.r_[np.c_[x_na,np.ones((x_na.shape[0],1))], np.c_[e_x,np.zeros((e_x.shape[0],1))]].T _,_,VT = nlg.svd(OCmat) NSmat = VT.T[:,d+1:] # null space rot_coefs = np.diag(np.ones(d) * rot_coef if np.isscalar(rot_coef) else rot_coef) # if d == 3: # x_diff = np.transpose(x_na[None,:,:] - x_na[:,None,:],(0,2,1)) # Pmat = e_x.dot(x_diff)[range(n),range(n),:]/(K_nn+1e-20) # else: # raise NotImplementedError # A1 = cp.Variable(n,d) #f.w_ng # A2 = cp.Variable(n,d) #f.wn_ng A = cp.Variable(NSmat.shape[1],d) # stacked form of f.w_ng and f.wn_ng B = cp.Variable(d,d) #f.lin_ag c = cp.Variable(d,1) #f.trans_g X = co.matrix(x_na) Y = co.matrix(y_ng) EX = co.matrix(e_x) EY = co.matrix(e_y) NS = co.matrix(NSmat) # working in the null space of the constraints KM = co.matrix(np.c_[K_nn, Mmat]) MDK = co.matrix(np.c_[Mmat.T,DKmat]) E = co.matrix(Emat) W = co.matrix(np.diag(wt_n)) R = co.matrix(rot_coefs) ones = co.matrix(np.ones((n,1))) constraints = [] # For correspondences V1 = cp.Variable(n,d) constraints.append(V1 == KM*NS*A+X*B+ones*c.T - Y) V2 = cp.Variable(n,d) constraints.append(V2 == cp.sqrt(W)*V1) # For normals N1 = cp.Variable(n,d) constraints.append(N1 == MDK*NS*A+EX*B - EY) N2 = cp.Variable(n,d) constraints.append(N2 == cp.sqrt(W)*N1) # For bending cost Quad = [] # for quadratic forms for i in range(d): Quad.append(cp.quad_form(A[:,i], NS.T*E*NS)) # For rotation cost V3 = cp.Variable(d,d) constraints.append(V3 == cp.sqrt(R)*B) # Orthogonality constraints for bending -- don't need these because working in the nullspace # constraints.extend([X.T*A1 +EX.T*A2== 0, ones.T*A1 == 0]) # TPS objective objective = cp.Minimize(cp.sum_squares(V2) + normal_coef*cp.sum_squares(N2) + bend_coef*sum(Quad) + cp.sum_squares(V3)) #objective = cp.Minimize(cp.sum_squares(V2) + bend_coef*sum(Quad) + cp.sum_squares(V3)) p = cp.Problem(objective, constraints) p.solve(verbose=True) Aval = NSmat.dot(np.array(A.value)) fn = registration.ThinPlateSplineNormals(d) fn.x_na, fn.n_na = x_na, e_x fn.w_ng, fn.wn_ng = Aval[0:n,:], Aval[n:,:] fn.trans_g, fn.lin_ag= np.squeeze(np.array(c.value)), np.array(B.value) import IPython IPython.embed() return fn
def tps_fit_normals_exact_cvx(x_na, y_ng, e_x = None, e_y = None, bend_coef=0.1, rot_coef=1e-5, normal_coef = 0.1, wt_n=None, delta=0.0001, nwsize=0.02): """ Solves as basic a problem as possible from Bookstein --> no limits taken Fits normals and points all at once. delta: edge length """ n,d = x_na.shape if wt_n is None: wt_n = co.matrix(np.ones(len(x_na))) # Normals if e_x is None: e_x = tu.find_all_normals_naive(x_na, nwsize, flip_away=True, project_lower_dim=(d==3)) if e_y is None: e_y = tu.find_all_normals_naive(y_ng, nwsize, flip_away=True, project_lower_dim=(d==3)) xs_na = x_na# - e_x*delta/2 xf_na = x_na + e_x*delta#/2 Kmat = tps.tps_kernel_matrix(x_na) K1mat = tps.tps_kernel_matrix2(x_na, xs_na) K2mat = tps.tps_kernel_matrix2(x_na, xf_na) K12mat = tps.tps_kernel_matrix2(xs_na, xf_na) K11mat = tps.tps_kernel_matrix(xs_na) K22mat = tps.tps_kernel_matrix(xf_na) Qmat = np.c_[np.ones((n,1)),x_na] Q1mat = np.c_[np.ones((n,1)),xs_na] Q2mat = np.c_[np.ones((n,1)),xf_na] M1mat = np.r_[K1mat,Q1mat.T] M2mat = np.r_[K2mat,Q2mat.T] Dmat_inv = np.diag([1.0/delta]*n) MDmat = (M2mat - M1mat).dot(Dmat_inv) DKmat = Dmat_inv.dot(K11mat + K22mat - K12mat - K12mat.T).dot(Dmat_inv) Lmat = np.r_[np.c_[Kmat,Qmat],np.c_[Qmat.T,np.zeros((d+1,d+1))]] LEmat = np.r_[np.c_[Lmat, MDmat], np.c_[MDmat.T, DKmat]] # working with the kernel of the orthogonality constraints OCmat = np.r_[np.c_[x_na,np.ones((x_na.shape[0],1))], np.zeros((d+1,d+1)), np.c_[e_x,np.zeros((e_x.shape[0],1))]].T _,_,VT = nlg.svd(OCmat) NSmat = VT.T[:,d+1:] # null space rot_coefs = np.diag(np.ones(d) * rot_coef if np.isscalar(rot_coef) else rot_coef) # Problem setup: A = cp.Variable(NSmat.shape[1],d) #f.w_ng R = co.matrix(slg.block_diag(np.zeros((n+1,n+1)),rot_coefs, np.zeros((n,n)))) NS = co.matrix(NSmat) # working in the null space of the constraints Y_EY = co.matrix(np.r_[y_ng,np.zeros((d+1,d)),e_y]) LE = co.matrix(LEmat) constraints = [] # For everything V1 = cp.Variable(2*n+d+1,d) constraints.append(V1 == LE*NS*A - Y_EY) # Bend cost Quad = [] # for quadratic forms for i in range(d): Quad.append(cp.quad_form(A[:,i], NS.T*LE*NS)) # V = cp.Variable(d,d) # constraints.append(V == Y_EY.T*A)#Y.T*A1+EY.T*A2) V2 = cp.Variable(2*n+d+1,d) constraints.append(V2 == cp.sqrt(R)*NS*A) # TPS objective #objective = cp.Minimize(cp.sum_squares(V2) + normal_coef*cp.sum_squares(N2) + bend_coef*cp.sum_squares(V3) + cp.sum_squares(V4) objective = cp.Minimize(cp.sum_squares(LE*NS*A - Y_EY) + bend_coef*sum(Quad) + rot_coef*cp.sum_squares(cp.sqrt(R)*NS*A)) p = cp.Problem(objective, constraints) p.solve(verbose=True) Aval = NSmat.dot(np.array(A.value)) fn = registration.ThinPlateSplineNormals(d) fn.x_na, fn.n_na = x_na, e_x fn.w_ng, fn.trans_g, fn.lin_ag, fn.wn_ng= Aval[:n,:], Aval[n,:], Aval[n+1:n+1+d,:], Aval[n+1+d:,:] import IPython IPython.embed() return fn
def tps_fit3_cvx(x_na, y_ng, bend_coef, rot_coef, wt_n): """ Use cvx instead of just matrix multiply. Working with null space of matrices. """ if wt_n is None: wt_n = co.matrix(np.ones(len(x_na))) n,d = x_na.shape K_nn = tps_kernel_matrix(x_na) _,_,VT = nlg.svd(np.c_[x_na,np.ones((x_na.shape[0],1))].T) Nmat = VT.T[:,d+1:] rot_coefs = np.diag(np.ones(d) * rot_coef if np.isscalar(rot_coef) else rot_coef) A = cp.Variable(Nmat.shape[1],d) B = cp.Variable(d,d) c = cp.Variable(d,1) Y = co.matrix(y_ng) K = co.matrix(K_nn) N = co.matrix(Nmat) X = co.matrix(x_na) W = co.matrix(np.diag(wt_n).copy()) R = co.matrix(rot_coefs) ones = co.matrix(np.ones((n,1))) constraints = [] # For correspondences V1 = cp.Variable(n,d) constraints.append(V1 == Y-K*N*A-X*B - ones*c.T) V2 = cp.Variable(n,d) constraints.append(V2 == cp.sqrt(W)*V1) # For bending cost Q = [] # for quadratic forms for i in range(d): Q.append(cp.quad_form(A[:,i], N.T*K*N)) # For rotation cost # Element wise square root actually works here as R is diagonal and positive V3 = cp.Variable(d,d) constraints.append(V3 == cp.sqrt(R)*B) # Orthogonality constraints for bending are taken care of already because working with the null space #constraints.extend([X.T*A == 0, ones.T*A == 0]) # TPS objective objective = cp.Minimize(cp.sum_squares(V2) + bend_coef*sum(Q) + cp.sum_squares(V3)) p = cp.Problem(objective, constraints) p.solve(verbose=True) return np.array(B.value), np.squeeze(np.array(c.value)) , Nmat.dot(np.array(A.value))