c <= 5000, a + b + c <= 10000, ] else: # set condition first half of year constraints = [ a >= 2000, b >= 2000, c >= 2000, a <= 4000, b <= 4500, a + b + c <= 10000, ] # set problem obj = cp.Maximize(p_a * a + p_b * b + p_c * c) prob = cp.Problem(obj, constraints) # solve prob.solve() # result (every month product cnt) a_cnt = round(float(a.value)) b_cnt = round(float(b.value)) c_cnt = round(float(c.value)) # set per month cnt a_month_cnt.append(a_cnt) b_month_cnt.append(b_cnt) c_month_cnt.append(c_cnt)
def np_simul_integerizer_cvx(sub_int_weights, parent_countrol_importance, parent_relax_ge_upper_bound, sub_countrol_importance, sub_float_weights, sub_resid_weights, lp_right_hand_side, parent_hh_constraint_ge_bound, sub_incidence, parent_incidence, total_hh_right_hand_side, relax_ge_upper_bound, parent_lp_right_hand_side, hh_constraint_ge_bound, parent_resid_weights, total_hh_sub_control_index, total_hh_parent_control_index): """ Parameters ---------- sub_int_weights : numpy.ndarray(sub_zone_count, sample_count) int parent_countrol_importance : numpy.ndarray(parent_control_count,) float parent_relax_ge_upper_bound : numpy.ndarray(parent_control_count,) float sub_countrol_importance : numpy.ndarray(sub_control_count,) float sub_float_weights : numpy.ndarray(sub_zone_count, sample_count) float sub_resid_weights : numpy.ndarray(sub_zone_count, sample_count) float lp_right_hand_side : numpy.ndarray(sub_zone_count, sub_control_count) float parent_hh_constraint_ge_bound : numpy.ndarray(parent_control_count,) float sub_incidence : numpy.ndarray(sample_count, sub_control_count) float parent_incidence : numpy.ndarray(sample_count, parent_control_count) float total_hh_right_hand_side : numpy.ndarray(sub_zone_count,) float relax_ge_upper_bound : numpy.ndarray(sub_zone_count, sub_control_count) float parent_lp_right_hand_side : numpy.ndarray(parent_control_count,) float hh_constraint_ge_bound : numpy.ndarray(sub_zone_count, sub_control_count) float parent_resid_weights : numpy.ndarray(sample_count,) float total_hh_sub_control_index : int total_hh_parent_control_index : int Returns ------- resid_weights_out : numpy.ndarray of float residual weights in range [0..1] as solved, or, in case of failure, sub_resid_weights unchanged status_text : string STATUS_OPTIMAL, STATUS_FEASIBLE in case of success, or a solver-specific failure status """ import cvxpy as cvx STATUS_TEXT = { cvx.OPTIMAL: 'OPTIMAL', cvx.INFEASIBLE: 'INFEASIBLE', cvx.UNBOUNDED: 'UNBOUNDED', cvx.OPTIMAL_INACCURATE: 'FEASIBLE', # for compatability with ortools cvx.INFEASIBLE_INACCURATE: 'INFEASIBLE_INACCURATE', cvx.UNBOUNDED_INACCURATE: 'UNBOUNDED_INACCURATE', None: 'FAILED' } CVX_MAX_ITERS = 1000 sample_count, sub_control_count = sub_incidence.shape _, parent_control_count = parent_incidence.shape sub_zone_count, _ = sub_float_weights.shape # - Decision variables for optimization x = cvx.Variable(sub_zone_count, sample_count) # x range is 0.0 to 1.0 unless resid_weights is zero, in which case constrain x to 0.0 x_max = (~(sub_float_weights == sub_int_weights)).astype(float) # - Create positive continuous constraint relaxation variables relax_le = cvx.Variable(sub_zone_count, sub_control_count) relax_ge = cvx.Variable(sub_zone_count, sub_control_count) parent_relax_le = cvx.Variable(parent_control_count) parent_relax_ge = cvx.Variable(parent_control_count) # - Set objective # could probably ignore as handled by constraint sub_countrol_importance[total_hh_sub_control_index] = 0 parent_countrol_importance[total_hh_parent_control_index] = 0 LOG_OVERFLOW = -725 log_resid_weights = np.log( np.maximum(sub_resid_weights, np.exp(LOG_OVERFLOW))).flatten('F') assert not np.isnan(log_resid_weights).any() log_parent_resid_weights = \ np.log(np.maximum(parent_resid_weights, np.exp(LOG_OVERFLOW))).flatten('F') assert not np.isnan(log_parent_resid_weights).any() # subzone and parent objective and relaxation penalties # note: cvxpy overloads * so * in following is matrix multiplication objective = cvx.Maximize( cvx.sum_entries(cvx.mul_elemwise(log_resid_weights, cvx.vec(x))) + cvx.sum_entries( cvx.mul_elemwise(log_parent_resid_weights, cvx.vec(cvx.sum_entries(x, axis=0)))) - # nopep8 cvx.sum_entries(relax_le * sub_countrol_importance) - cvx.sum_entries(relax_ge * sub_countrol_importance) - cvx.sum_entries( cvx.mul_elemwise(parent_countrol_importance, parent_relax_le)) - cvx.sum_entries( cvx.mul_elemwise(parent_countrol_importance, parent_relax_ge))) constraints = [ (x * sub_incidence) - relax_le >= 0, (x * sub_incidence) - relax_le <= lp_right_hand_side, (x * sub_incidence) + relax_ge >= lp_right_hand_side, (x * sub_incidence) + relax_ge <= hh_constraint_ge_bound, x >= 0.0, x <= x_max, relax_le >= 0.0, relax_le <= lp_right_hand_side, relax_ge >= 0.0, relax_ge <= relax_ge_upper_bound, # - equality constraint for the total households control cvx.sum_entries(x, axis=1) == total_hh_right_hand_side, cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) - parent_relax_le >= 0, # nopep8 cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) - parent_relax_le <= parent_lp_right_hand_side, # nopep8 cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) + parent_relax_ge >= parent_lp_right_hand_side, # nopep8 cvx.vec(cvx.sum_entries(x, axis=0) * parent_incidence) + parent_relax_ge <= parent_hh_constraint_ge_bound, # nopep8 parent_relax_le >= 0.0, parent_relax_le <= parent_lp_right_hand_side, parent_relax_ge >= 0.0, parent_relax_ge <= parent_relax_ge_upper_bound, ] prob = cvx.Problem(objective, constraints) assert CVX_SOLVER in cvx.installed_solvers(), \ "CVX Solver '%s' not in installed solvers %s." % ( CVX_SOLVER, cvx.installed_solvers()) logger.info("simul_integerizing with '%s' solver." % CVX_SOLVER) try: prob.solve(solver=CVX_SOLVER, verbose=True, max_iters=CVX_MAX_ITERS) except cvx.SolverError as e: logging.warning('Solver error in SimulIntegerizer: %s' % e) # if we got a result if np.any(x.value): resid_weights_out = np.asarray(x.value) else: resid_weights_out = sub_resid_weights status_text = STATUS_TEXT[prob.status] return resid_weights_out, status_text
def tight_infer_with_partial_graph(y_val, s1_val, s0_val, oa, oe, om): partial_cbn = load_xml_to_cbn(partial_model) partial_cbn.build_joint_table() Age = partial_cbn.v['age'] Edu = partial_cbn.v['education'] Sex = partial_cbn.v['sex'] Workclass = partial_cbn.v['workclass'] Marital = partial_cbn.v['marital-status'] Hours = partial_cbn.v['hours'] Income = partial_cbn.v['income'] if s1_val == s0_val: # there is no difference when active value = reference value return 0.00, 0.00 else: # define variable for P(r) PR = cvx.Variable(Marital.domain_size**8) # define ell functions g = {} for v in {Marital}: v_index = v.index v_domain_size = v.domain_size parents_index = partial_cbn.index_graph.pred[v_index].keys() parents_domain_size = np.prod( [partial_cbn.v[i].domain_size for i in parents_index]) g[v_index] = list( product(range(v_domain_size), repeat=int(parents_domain_size))) # format # [(), (), ()] # r corresponds to the tuple # parents corresponds to the location of the tuple # assert the response function. (t function of Pearl, I function in our paper) def Indicator(obs, parents, response): # sort the parents by id par_key = parents.keys() # map the value to index par_index = 0 for k in par_key: par_index = par_index * partial_cbn.v[ k].domain_size + parents.dict[k] return 1 if obs.first_value() == g[ obs.first_key()][response][par_index] else 0 # build the object function weights = np.zeros(shape=[Marital.domain_size**8]) for rm in range(Marital.domain_size**8): # assert r -> o to obtain the conditional individuals product_i = 1 for (obs, parents, response) in [(Event({Marital: om}), Event({ Sex: s0_val, Age: oa, Edu: oe }), rm)]: product_i *= Indicator(obs, parents, response) if product_i == 1: # if ALL I()= 1, then continue the counterfactual inference # the first term for pse sum_identity = 0.0 for m1, w, h in product(Marital.domains.get_all(), Workclass.domains.get_all(), Hours.domains.get_all()): product_i = partial_cbn.get_prob(Event({Sex: s0_val}), Event({})) * \ partial_cbn.get_prob(Event({Age: oa}), Event({})) * \ partial_cbn.get_prob(Event({Edu: oe}), Event({Age: oa})) * \ Indicator(Event({Marital: m1}), Event({Sex: s1_val, Age: oa, Edu: oe}), rm) * \ partial_cbn.get_prob(Event({Workclass: w}), Event({Age: oa, Edu: oe, Marital: m1})) * \ partial_cbn.get_prob(Event({Hours: h}), Event({Workclass: w, Edu: oe, Marital: m1, Age: oa, Sex: s1_val})) * \ partial_cbn.get_prob(Event({Income: y_val}), Event({Sex: s1_val, Edu: oe, Workclass: w, Marital: m1, Hours: h, Age: oa})) sum_identity += product_i weights[rm] += sum_identity # the second term for pse sum_identity = 0.0 for m0, w, h in product(Marital.domains.get_all(), Workclass.domains.get_all(), Hours.domains.get_all()): product_i = partial_cbn.get_prob(Event({Sex: s0_val}), Event({})) * \ partial_cbn.get_prob(Event({Age: oa}), Event({})) * \ partial_cbn.get_prob(Event({Edu: oe}), Event({Age: oa})) * \ Indicator(Event({Marital: m0}), Event({Sex: s0_val, Age: oa, Edu: oe}), rm) * \ partial_cbn.get_prob(Event({Workclass: w}), Event({Age: oa, Edu: oe, Marital: m0})) * \ partial_cbn.get_prob(Event({Hours: h}), Event({Workclass: w, Edu: oe, Marital: m0, Age: oa, Sex: s0_val})) * \ partial_cbn.get_prob(Event({Income: y_val}), Event({Sex: s0_val, Edu: oe, Workclass: w, Marital: m0, Hours: h, Age: oa})) sum_identity += product_i weights[rm] -= sum_identity # build the objective function objective = weights.reshape(1, -1) @ PR / partial_cbn.get_prob( Event({ Sex: s0_val, Age: oa, Edu: oe, Marital: om })) ############################ ### to build the constraints ############################ ### the inferred model is consistent with the observational distribution A_mat = np.zeros( (Age.domain_size, Edu.domain_size, Marital.domain_size, Sex.domain_size, Marital.domain_size**8)) b_vex = np.zeros((Age.domain_size, Edu.domain_size, Marital.domain_size, Sex.domain_size)) # assert r -> v for a, e, m, s in product(Age.domains.get_all(), Edu.domains.get_all(), Marital.domains.get_all(), Sex.domains.get_all()): # calculate the probability of observation b_vex[a.index, e.index, m.index, s.index] = partial_cbn.get_prob( Event({ Age: a, Edu: e, Marital: m, Sex: s })) # sum of P(r) for rm in range(Marital.domain_size**8): product_i = partial_cbn.get_prob(Event({Sex: s}), Event({})) * \ partial_cbn.get_prob(Event({Age: a}), Event({})) * \ partial_cbn.get_prob(Event({Edu: e}), Event({Age: a})) * \ Indicator(Event({Marital: m}), Event({Sex: s, Age: a, Edu: e}), rm) A_mat[a.index, e.index, m.index, s.index, rm] = product_i # flatten the matrix and vector A_mat = A_mat.reshape(-1, Marital.domain_size**8) b_vex = b_vex.reshape(-1, 1) ### the probability <= 1 C_mat = np.identity(Marital.domain_size**8) d_vec = np.ones(Marital.domain_size**8) ### the probability is positive E_mat = np.identity(Marital.domain_size**8) f_vec = np.zeros(Marital.domain_size**8) constraints = [ A_mat @ PR == b_vex, C_mat @ PR <= d_vec, E_mat @ PR >= f_vec ] # minimize the causal effect problem = cvx.Problem(cvx.Minimize(objective), constraints) problem.solve() # print('tight lower effect: %f' % (problem.value)) lower = problem.value # maximize the causal effect problem = cvx.Problem(cvx.Maximize(objective), constraints) problem.solve() # print('tight upper effect: %f' % (problem.value)) upper = problem.value return upper, lower
def optimization_fun(ret, e, bench_wei, pre_w=None, is_enhance=True, lamda=10, c=0.015, turnover=None, te=None, industry_max_expose=0, risk_factor_dict={}, limit_factor_df=None, in_benchmark=True, in_benchmark_wei=0.8, max_num=None): if in_benchmark: # 如果必须在成份股内选择,则需要对风险矩阵进行处理,跳出仅是成份股的子矩阵 wei_tmp = bench_wei.dropna() bug_maybe = [i for i in wei_tmp.index if i not in e.index] if len(bug_maybe) > 0: print('存在下列股票不在组合里,请检查') print(bug_maybe) e_tmp = e.loc[wei_tmp.index, wei_tmp.index].fillna(0) ret_tmp = ret[wei_tmp.index].fillna(0) if pre_w: pre_w = pre_w[wei_tmp.index].fillna(0) else: # 确保几个重要变量有相同的index n_index = [i for i in e.index if i in ret.index] e_tmp = e.loc[n_index, n_index] ret_tmp = ret[n_index] wei_tmp = bench_wei[n_index].fillna(0) if isinstance(pre_w, pd.Series): to_test_list = len([i for i in pre_w.index if i not in n_index]) if np.any(pre_w[to_test_list] > 0.001): input('input:存在部分有权重的股票在上期,而不再当期的数据中,请检查') pre_w = pre_w[n_index].fillna(0) # 如果可以选非成份股,则可以确定一个成份股权重比例的约束条件。 is_in_bench = deepcopy(wei_tmp) is_in_bench[is_in_bench > 0] = 1 # 代表是否在成份股内的变量 data = Data() basic = data.stock_basic_inform industry_sw = basic[['申万一级行业']] # 股票组合的行业虚拟变量 industry_map = industry_sw.loc[ret_tmp.index, :] # dummies_bench = pd.get_dummies(industry_map.loc[bench_wei.index, :]) # dummies_bench.sum() 不同行业的公司数量 industry_map.fillna('综合', inplace=True) dummies = pd.get_dummies(industry_map[industry_map.columns[0]]) dummies.sum() # 个股最大权重为行业权重的 3/4 ind_wei = np.dot(dummies.T, wei_tmp) ind_wei_se = pd.Series(index=dummies.columns, data=ind_wei) industry_map['max_wei'] = None for i in industry_map.index: try: industry_map.loc[ i, 'max_wei'] = 0.75 * ind_wei_se[industry_map.loc[i, '申万一级行业']] except Exception as e: industry_map.loc[i, 'max_wei'] = 0.02 max_wei = industry_map['max_wei'].values x = cp.Variable(len(ret_tmp), nonneg=True) q = ret_tmp.values P = lamda * e_tmp.values ind_wei = np.dot(dummies.T, wei_tmp) # b.shape ind_wei_su = pd.Series(ind_wei, index=dummies.columns) dum = dummies.T.values # A.shape para_dict = { 'x': x, 'max_wei': max_wei, 'in_benchmark_wei': in_benchmark_wei, 'is_in_bench': is_in_bench, 'ret_e': ret_tmp, 'dum': dum, 'wei_tmp': wei_tmp, 'ind_wei': ind_wei, 'risk_factor_dict': risk_factor_dict, 'limit_factor_df': limit_factor_df, 'pre_w': pre_w, 'P': P, 'total_wei': 1, } con_dict = { 'in_benchmark': in_benchmark, 'industry_max_expose': industry_max_expose, 'turnover': turnover, 'te': te, } constraints = generates_constraints(para_dict, con_dict) prob = generates_problem(q, x, P, c, pre_w, constraints, te) print('开始优化...') time_start = time.time() prob.solve() status = prob.status # 如果初始条件无解,需要放松风险因子的约束 iters = 0 while status != 'optimal' and iters < 3: if len(risk_factor_dict) > 0 and iters == 0: tmp_d = deepcopy(risk_factor_dict) for k, v in tmp_d.items(): tmp_d[k] = v + 0.5 para_dict['risk_factor_dict'] = tmp_d elif not turnover and iters == 1: turnover = turnover + 0.2 con_dict['turnover'] = turnover elif iters == 2: industry_max_expose = industry_max_expose + 0.05 con_dict['industry_max_expose'] = industry_max_expose iters = iters + 1 constraints = generates_constraints(para_dict, con_dict) prob = generates_problem(q, x, P, c, pre_w, constraints, te) print('第{}次优化'.format(iters)) prob.solve() status = prob.status time_end = time.time() print('优化结束,用时', time_end - time_start) print('优化结果为{}'.format(status)) # if prob.status != 'optimal': # input('input:未得出最优解,请检查') # np.sum(x.value) # np.sum(x.value > 0.0) # np.sum(x.value > 0.001) # np.sum(x.value[x.value > 0.001]) # np.sum(x.value[x.value < 0.001]) # 返回值 wei_ar = np.array(x.value).flatten() # wei_ar.size wei_se = pd.Series(wei_ar, index=ret_tmp.index) # 设定标准,一般情况下无需对股票数量做二次优化,只有股票数量过多是才需要。 if np.sum(x.value > 0.001) > max_num: print('进行第二轮股票数量的优化') # wei_selected, n2, tobe_opt = select_import_wei(wei_se, max_num) tobe_opt = list(wei_se[wei_se > 0.001].index) print('第二次优化为从{}支股票中优化选择出{}支'.format(len(tobe_opt), max_num)) # 经过处理后,需要优化的计算量大幅度减少。比如第一次优化后,权重大于0.001的股票数量是135,超过最大要求的100。 # 我们首先保留其中前90,然后从后面的45个中选择10保留下来。 len(tobe_opt) e_tmp2 = e_tmp.loc[tobe_opt, tobe_opt] ret_tmp2 = ret_tmp[tobe_opt] # wei_tmp2 = wei_tmp[tobe_opt] is_in_bench2 = is_in_bench[tobe_opt] dummies2 = pd.get_dummies(industry_map.loc[tobe_opt, industry_map.columns[0]]) dum2 = dummies2.T.values # 小坑 new_ind = ind_wei_su[dummies2.columns] new_ind = new_ind / new_ind.sum() ind_wei2 = new_ind.values # 对个股权重优化的坑,开始时是行业权重乘以0.75,但在二次优化的时候,可能有的行情的权重不够用了。 max_wei2 = 3 * industry_map.loc[tobe_opt, 'max_wei'].values total_wei = 1 if pre_w: pre_w = pre_w[tobe_opt] P2 = lamda * e_tmp2.values # 有些行业个股权重以前的不够了 x = cp.Variable(len(ret_tmp2), nonneg=True) y = cp.Variable(len(ret_tmp2), boolean=True) para_dict2 = { 'x': x, 'y': y, 'y_sum': max_num, # - n2, 'max_wei': max_wei2, # max_wei2.max() max_wei2.sum() 'in_benchmark_wei': in_benchmark_wei, 'is_in_bench': is_in_bench2, 'ret_e': ret_tmp2, 'dum': dum2, 'wei_tmp': wei_tmp, 'ind_wei': ind_wei2, # ind_wei2.sum() 'risk_factor_dict': risk_factor_dict, 'limit_factor_df': limit_factor_df, 'pre_w': pre_w, 'P': P, 'total_wei': total_wei } con_dict2 = { 'in_benchmark': in_benchmark, 'industry_max_expose': industry_max_expose, 'turnover': turnover, 'te': te, } q2 = ret_tmp2.values # P2.shape # q2.shape # ind_wei2.sum() # max_wei2.sum() cons = generates_constraints(para_dict2, con_dict2) prob = cp.Problem(cp.Maximize(q2.T * x - cp.quad_form(x, P2)), cons) prob.solve(solver=cp.ECOS_BB, feastol=1e-10) print(prob.status) if prob.status != 'optimal': input('input:二次股票数量优化时,未得出最优解,请检查') # winsound.Beep(600, 2000) # print(x.value) # print(y.value) # np.sum(x.value > 0.001) # np.sum(x.value) # np.sum(y.value) # np.sum(x.value[y.value == 1]) # # prob = cp.Problem(cp.Maximize(q.T * x - cp.quad_form(x, P)), # - cp.quad_form(x, P)), # constraints) # print(prob.is_dcp()) # prob.solve() # print(prob.status) # print(x.value) # # np.sum(x.value > 0.01) # # # np.vstack((a, b)) # 在垂直方向上拼接 # # np.hstack((a, b)) # 在水平方向上拼接 # industry_max_expose = 0.05 # # if max_num: # ''' # 优化目标函数: # ECOS is a numerical software for solving convex second-order cone programs (SOCPs) of type # min c'*x # s.t. A * x = b # G * x <= _K h # 步骤: # 1,假设股票数量没有约束,求解组合优化,得到绝对权重向量 # 2,对股票数量不过N_max的原始可行域进行限制,选股空间为w1中有权重(>1e-6),数量为n1 # 强制保留w1中各行业最大权重股以及其他权重靠前的股票,数量为n2,n2<N_max # 3,在第2步限制后的可行域内运用BB算法求解最优权重,设置最大迭代刺猬niters,超过 # 迭代次数返回截至目前的最优解。 # ''' # # 步骤1 # sol = solvers.qp(P, q, G, h, A, b) # wei = sol['x'] # print(wei) wei.size # wei_ar = np.array(wei).flatten() # wei_ar.size # n1 = np.sum(wei_ar > 0) # # np.sum(wei_ar[wei_ar > 0.01]) # wei_se = pd.Series(wei_ar, index=ret_tmp.index) # # 步骤2 # wei_selected, n2 = select_import_wei(wei_se) # # 步骤3 # wei_selected, n2 # # x = cp.Variable(len(ret_tmp), nonneg=True) # y = cp.Variable(len(ret_tmp), boolean=True) # prob = cp.Problem(cp.Maximize(q.T * x - cp.quad_form(x, P)), # - cp.quad_form(x, P)), # [ # G @ x <= h, # print(P) G.size h.size # x - y <= 0, # A @ x == b, # cp.sum(x) == 1, # cp.sum(y) <= 200, # ]) # print(prob.is_dcp()) # # max_iters: maximum number of iterations # # reltol: relative accuracy(default: 1e-8) # # feastol: tolerance for feasibility conditions (default: 1e-8) # # reltol_inacc: relative accuracy for inaccurate solution (default: 5e-5) # # feastol_inacc: tolerance for feasibility condition for inaccurate solution(default:1e-4) # # prob.solve(solver=cp.ECOS_BB, max_iters=20000, feastol=1e-4, reltol=1e-4, reltol_inacc=1e-4, # feastol_inacc=1e-4) # # prob.solve(solver=cp.ECOS_BB, max_iters=20, feastol=1e-5, reltol=1e-5, feastol_inacc=1e-1) # print(prob.status) # print(x.value) # print(y.value) # # max_num # # # pass # # # print(cvxpy.installed_solvers()) # # # # np.sum(A, axis=1) # # A.size # # np.linalg.matrix_rank(A) # # # # sol = solvers.qp(P, q, G, h, A, b) # # wei = sol['x'] # print(wei) wei.size # # # # np.rank(A) # # print(A) # # print(q.T * wei) # # # # # np.sum(wei) # # wei_ar = np.array(wei).flatten() # wei_ar.size # # # np.sum(wei_ar > 0.01) # # # np.sum(wei_ar[wei_ar > 0.01]) return wei_se
x = np.asarray([[0, 0, 0, 0, 0], [10, 20, 30, 40, 50]]) y = np.asarray([[-10, -20, -30, -40, -50], [0, 0, 0, 0, 0]]) lamda = cp.Variable(5) mu = cp.Variable(5) t_sum_x = cp.Variable(2) t_sum_y = cp.Variable(2) constraints = [cp.sum(lamda) == 0.5, cp.sum(mu) == 0.5, lamda >= 0, mu >= 0] x = x.transpose() y = y.transpose() for i in range(5): t_sum_x = t_sum_x + (lamda[i] * x[i]) t_sum_y = t_sum_y + (mu[i] * y[i]) obj = cp.Maximize(-cp.norm(t_sum_x - t_sum_y)) prob = cp.Problem(obj, constraints) prob.solve() v_x = np.zeros(2) v_y = np.zeros(2) for i in range(5): v_x = v_x + (lamda.value[i] * x[i]) v_y = v_y + (mu.value[i] * y[i]) print("status:", prob.status) print("optimal lambda", lamda.value) print("optimal mu", mu.value) print("max val", -1 * np.linalg.norm(v_x - v_y)) print("Hull point 1:", 2 * v_x)
def iter_dccp(self, max_iter, tau, mu, tau_max, solver, **kwargs): """ ccp iterations :param max_iter: maximum number of iterations in ccp :param tau: initial weight on slack variables :param mu: increment of weight on slack variables :param tau_max: maximum weight on slack variables :param solver: specify the solver for the transformed problem :return value of the objective function, maximum value of slack variables, value of variables """ # split non-affine equality constraints constr = [] for arg in self.constraints: if str(type(arg)) == "<class 'cvxpy.constraints.zero.Zero'>" and not arg.is_dcp(): constr.append(arg.expr.args[0]<=arg.expr.args[1]) constr.append(arg.expr.args[1]<=arg.expr.args[0]) else: constr.append(arg) obj = self.objective self = cvx.Problem(obj, constr) it = 1 converge = False # keep the values from the previous iteration or initialization previous_cost = float("inf") previous_org_cost = self.objective.value variable_pres_value = [] for var in self.variables(): variable_pres_value.append(var.value) # each non-dcp constraint needs a slack variable var_slack = [] for constr in self.constraints: if not constr.is_dcp(): var_slack.append(cvx.Variable(constr.size)) while it<=max_iter and all(var.value is not None for var in self.variables()): constr_new = [] # objective temp = convexify_obj(self.objective) if not self.objective.is_dcp(): # non-sub/super-diff while temp is None: # damping var_index = 0 for var in self.variables(): #var_index = self.variables().index(var) var.value = 0.8*var.value + 0.2* variable_pres_value[var_index] var_index += 1 temp = convexify_obj(self.objective) # domain constraints for dom in self.objective.args[0].domain: constr_new.append(dom) # new cost function cost_new = temp.args[0] # constraints count_slack = 0 for arg in self.constraints: temp = convexify_constr(arg) if not arg.is_dcp(): while temp is None: # damping for var in self.variables: var_index = self.variables().index(var) var.value = 0.8*var.value + 0.2* variable_pres_value[var_index] temp = convexify_constr(arg) newcon = temp[0] # new constraint without slack variable for dom in temp[1]:# domain constr_new.append(dom) right = newcon.expr.args[1] + var_slack[count_slack] constr_new.append(newcon.expr.args[0]<=right) constr_new.append(var_slack[count_slack]>=0) count_slack = count_slack+1 else: constr_new.append(temp) # objective if self.objective.NAME == 'minimize': for var in var_slack: cost_new += tau*cvx.sum(var) obj_new = cvx.Minimize(cost_new) else: for var in var_slack: cost_new -= tau*cvx.sum(var) obj_new = cvx.Maximize(cost_new) # new problem prob_new = cvx.Problem(obj_new, constr_new) # keep previous value of variables variable_pres_value = [] for var in self.variables(): variable_pres_value.append(var.value) # solve if solver is None: logger.info("iteration=%d, cost value=%.5f, tau=%.5f", it, prob_new.solve(**kwargs), tau) else: logger.info("iteration=%d, cost value=%.5f, tau=%.5f", it, prob_new.solve(solver=solver, **kwargs), tau) max_slack = None # print slack if (prob_new._status == "optimal" or prob_new._status == "optimal_inaccurate") and not var_slack == []: slack_values = [v.value for v in var_slack if v.value is not None] max_slack = max([np.max(v) for v in slack_values] + [-np.inf]) logger.info("max slack = %.5f", max_slack) #terminate if np.abs(previous_cost - prob_new.value) <= 1e-3 and np.abs(self.objective.value - previous_org_cost) <= 1e-3: it_real = it it = max_iter+1 converge = True else: previous_cost = prob_new.value previous_org_cost = self.objective.value it_real = it tau = min([tau*mu,tau_max]) it += 1 # return if converge: self._status = "Converged" else: self._status = "Not_converged" var_value = [] for var in self.variables(): var_value.append(var.value) if not var_slack == []: return(self.objective.value, max_slack, var_value) else: return(self.objective.value, var_value)
A[S[ac[0]]][idx] += 1 else: for nxt, prob in Q[ac[0]][ac[1] - 1].items(): A[S[ac[0]]][idx] += prob A[S[nxt]][idx] -= prob A = np.reshape(A, (60, cols)) rewards = [] [rewards.append(0) if ac[1] == 0 else rewards.append(-20) for ac in actions] rewards = np.reshape(rewards, (1, cols)) alpha = [0] * 60 alpha[59] = 1 alpha = np.reshape(alpha, (60, 1)) X = cp.Variable((cols, 1)) constraints = [(A @ X) == alpha, X >= 0] obj = cp.Maximize(rewards * X) prob = cp.Problem(obj, constraints) prob.solve() print("status:", prob.status) print("Optimal value:", prob.value) print("Optimal X:", X.value) for j in S: S[j] = -10 X = np.reshape(X.value, (cols, )) for val, ac in zip(X, actions): if (S[ac[0]] < val): S[ac[0]] = val if (ac[1] == 0): Q[ac[0]] = 'NOOP' elif (ac[1] == 1): Q[ac[0]] = 'SHOOT'
import numpy as np v = np.array([1,1,1,1]) # edge weight from {a,b,c,d} to {e,f,g,h} c = np.array([[1,0,3,0], [0,2,0,4], [0,0,0,5], [0,1,2,0] ]) # boolean matrix to check whether the edge will result in maximum bipartite graph or not x = cp.Variable((4,4), boolean=True) y=[0,0,0,0] constraints =[] for i in range(0,4): sum1=0 sum2=0 for j in range(0,4): sum1+=x[i][j] sum2+=x[j][i] #the number of edges between any two vertices should be <=1. Therefore these constraints ensure that no row or column should sum upto more than one. constraints+=[sum1<=v[i]] constraints+=[sum2<=v[i]] #maximizing the set of edges prob = cp.Problem(cp.Maximize(cp.sum(cp.multiply(c,x))), constraints) prob.solve() print("The maximum weight is", prob.value) print("The edge matrix is",x.value)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np import cvxpy as cp # problem data v1 = [2, 3, 3] v2 = [4, 5, 10] v = np.array(v1 + v2) n = len(v) # construct and solve the problem x = cp.Variable(n) objective = cp.Maximize(v @ x) constraints = [ 0 <= x, x <= 1, # continuous relaxation of x \in {0, 1} cp.sum(x[0::3]) <= 1, # at-most-one constraint for agent 1 cp.sum(x[3::]) <= 1, cp.sum(x[0] + x[2]) + cp.sum(x[3] + x[5]) <= 1, # pairwise-disjoint cp.sum(x[1] + x[2]) + cp.sum(x[4] + x[5]) <= 1 ] prob = cp.Problem(objective, constraints) result = prob.solve(verbose=True) # The optimal value for x is stored in `x.value` which is assigned when # prob.solve() is executed. print(f"Optimal value for the problem is: {np.round(result, 4)}.") print(f"Optimal solution for the problem is: {np.round(x.value, 4)}.") # The optimal Lagrange multiplier for a constraint is stored in
a2 = np.array([2,-2]) a3 = np.array([-1,2]) a4 = np.array([-2,-2]) A = np.array([[3,1], [2,-2], [-1,2], [-2,-2]]) A.shape b = np.ones(4) r = cp.Variable(1) xc = cp.Variable(2) objective = cp.Maximize(r) constraints = [a1 @ xc + r*math.sqrt(a1@a1) <= b[0], a2 @ xc + r*math.sqrt(a2@a2) <= b[1], a3 @ xc + r*math.sqrt(a3@a3) <= b[2], a4 @ xc + r*math.sqrt(a4@a4) <= b[3] ] prob = cp.Problem(objective, constraints) result = prob.solve() print(r.value) print(xc.value) # Plotting radius = r.value center = np.array(xc.value)
log_sum+=hazard(t_i,0,alpha_ji) expr += CVX.log(log_sum) """ # calculate bad infection for parent and child relation """ T = time_period alpha_ji = Ai[target_node] expr += bad_infection * logSurvival(T,0,alpha_ji) """ #print('expr: {}'.format(expr)) #time.sleep(2) tempA = np.zeros(num_nodes, dtype=float) try: prob = CVX.Problem(CVX.Maximize(expr), constraints) #res = prob.solve(verbose=True,max_iters=500) res = prob.solve(verbose=True, solver=CVX.CVXOPT) #if prob.status in [CVX.OPTIMAL, CVX.OPTIMAL_INACCURATE]: tempA = np.asarray(Ai.value).squeeze().tolist() #A[:,convexNodes[target_node]] = tempA #print(len(tempA)) #Aone = {} #for x in range(len(tempA)): # Aone[convexNodesArr[x]] = tempA[x] #print(A) #else: # A[:, target_node] = -1 #print('result: {}'.format(res)) except BaseException as e: print(e)
def NOA(h11, h12, h21, h22, v, QQ1, QQ2, r1, r2, N, phii1, phii2, pma, eta, YY1, YY2): """ non-orthogonal """ cpln2 = 0.6931471805599453 # ln2 = 0.6931471805599453 epsilon = 0.0001 # iteration breaking threshold itera_max = 100 amp = 1 N *= amp k = (2**r1 - 1) * (2**r2 - 1) * h12 * h21 / (h11 * h22) pnot1 = N * ( (2**r1 - 1) * h21 / h11 + k) / (h21 * (1 - k)) # minimum power required pnot2 = N * ((2**r2 - 1) * h12 / h22 + k) / (h12 * (1 - k)) p_max = np.array([[pma], [pma]]) # 2 × 1 p_min = np.array([[pmin], [pmin]]) # 2 × 1 # p_min = np.array([[0], [0]]) if k < 1 and pnot1 <= pma and pnot2 <= pma: I = 0 # iteration num objNOA = -10000 pk1 = pma pk2 = pma while True: I += 1 p = cp.Variable(shape=(2, 1), nonneg=True) # 2 × 1, np.dot(A, p) - B # if use "f = (...) / cp.log(2)", will raise error "Problem does not follow DCP rules." f = (v * alpha1 + 2 * u1 * QQ1) * cp.log(N + h11 * p[0][0] * amp + h12 * p[1][0] * amp) / cpln2 \ + (v * alpha2 + 2 * u2 * QQ2) * cp.log(N + h22 * p[1][0] * amp + h21 * p[0][0] * amp) / cpln2 \ - (v * eta * beta1) * p[0][0] - (v * eta * beta2) * p[1][0] \ - 2 * (u1 * QQ1 * r1 - phii1 * v1 * YY1 + u2 * QQ2 * r2 - phii2 * v2 * YY2) y1 = N + h12 * pk2 * amp y2 = N + h21 * pk1 * amp g = (v * alpha1 + 2 * u1 * QQ1) * cp.log(y1) / cpln2 + ( v * alpha2 + 2 * u2 * QQ2) * cp.log(y2) / cpln2 vectorP = np.array([p[0][0] - pk1, p[1][0] - pk2]) deltaG = np.array([ (v * alpha2 + 2 * u2 * QQ2) * h21 / ((N + h21 * pk1) * cpln2), (v * alpha1 + 2 * u1 * QQ1) * h12 / ((N + h12 * pk2) * cpln2) ]) A = np.array([[-1, h12 * (2**r1 - 1) / h11], [h21 * (2**r2 - 1) / h22, -1]]) # 2 × 2 B = np.array([[-(2**r1 - 1) * N / h11], [-(2**r2 - 1) * N / h22]]) # 2 × 1 objfunc = cp.Maximize(f - g - deltaG[0] * vectorP[0] - deltaG[1] * vectorP[1]) constr = [p_min <= p, p <= p_max, (A * p - B) <= 0] prob = cp.Problem(objfunc, constr) # print(prob) prob.solve(solver=cp.SCS) if prob.status == 'optimal' or prob.status == 'optimal_inaccurate': pp1 = max(p.value[0][0], pmin) pp2 = max(p.value[1][0], pmin) pp1 = min(p.value[0][0], pma) pp2 = min(p.value[1][0], pma) k1 = h11 * pp1 / (N + h12 * pp2) # SINR k2 = h22 * pp2 / (N + h21 * pp1) if k1 >= 0 and k2 >= 0: Rk1 = np.log2(1 + k1) # channel capacity, bps Rk2 = np.log2(1 + k2) else: pp1 = 0 pp2 = 0 objNOA = prob.value Rk1 = 0 Rk2 = 0 if abs(objNOA - objfunc.value) <= epsilon: objNOA = objfunc.value break elif I >= itera_max: objNOA = objfunc.value break else: pk1 = pp1 pk2 = pp2 objNOA = objfunc.value else: pp1 = 0 pp2 = 0 objNOA = -10000 Rk1 = 0 Rk2 = 0 break else: pp1 = 0 pp2 = 0 objNOA = -10000 Rk1 = 0 Rk2 = 0 return objNOA, pp1, pp2, Rk1, Rk2
def optimization_problem(data_frame): """Defines the optimization problem, and solves it for the maximum revenue along with saving the relevant result as a dataframe and a plot.""" #LMP Prices prices = data_frame["LMP_kWh"] #Initialize Variables for optimization Problem rate = cp.Variable((len(data_frame), 1)) E = cp.Variable((len(data_frame), 1)) #Create max, min for the 3 optimization variables discharge_max = 5 charge_max = -5 SOC_max = 0.9 * 14 SOC_min = 0.1 * 14 #Initialize constraints and revenue constraints = [] revenue = 0 print("Starting Constraint Creation") #Create constraints for the each time step along with revenue. for i in range(len(data_frame)): if i % 1000 == 0: print(i) constraints += [ rate[i] <= discharge_max, #Rate should be lower than or equal to max rate, rate[i] >= charge_max, E[i] <= SOC_max, #Overall kW should be within the range of [SOC_min,SOC_max] E[i] >= SOC_min ] revenue += prices[i] * ( rate[i] ) #Revenue = sum of (prices ($/kWh) * (energy sold (kW) * 1hr - energy bought (kW) * 1hr) at timestep t) for i in range(1, len(data_frame)): if i % 1000 == 0: print(i) constraints += [E[i] == E[i - 1] + rate[i - 1] ] #Current SOC constraint constraints += [E[0] == random.uniform(SOC_min, SOC_max), rate[0] == 0] #create first time step constraints print("Solving problem") #Create Problem and solve to find Optimal Revenue and Times to sell. prob = cp.Problem(cp.Maximize(revenue), constraints) prob.solve(solver=cp.ECOS, verbose=True) print("Optimal Maximum Revenue is {0}".format(prob.value)) #Convert values for the variables into arrays E_val = [E.value[i][0] for i in range(len(data_frame))] charge_val = [rate.value[i][0] for i in range(len(data_frame))] #Join values to the data frame data_frame["E"] = E_val data_frame["Charge"] = charge_val data_frame["DATETIME"] = data_frame.index revenue = [0] for i in range(1, len(data_frame)): revenue.append(revenue[-1] + prices[i] * charge_val[i]) data_frame["Cumulative Additive Revenue"] = revenue print("Saving DF") #Save dataframe data_frame.to_csv("df_LMP.csv") print("Plotting 2 day timeline") #Plot dataframe for 2 days f = plt.figure(figsize=(20, 20)) data_frame.iloc[:48].plot(x="DATETIME", y=["E", "Charge"]) plt.xlabel("DateTime") plt.ylabel("Power- kW") plt.show() plt.savefig('2_day_battery_energy_arbitrage.png') return data_frame
#!/usr/bin/env python3 import cvxpy as cp import numpy as np # Problem data. # Glove eqn 18/8 m = 30 n = 20 np.random.seed(1) X = np.random.randn(n, n) fX = np.random.randn(n, n) B = np.random.randn(n, n) # Construct the problem. W = cp.Variable((n, n)) objective = cp.Maximize(cp.prod(fX, cp.sum(W @ W.T + B - np.log(X)))) #objective = cp.Minimize(cp.sum_squares(A@x - b)) # constraints = [0 <= x, x <= 1] contraints = [W >= 0] prob = cp.Problem(objective, constraints) # The optimal objective value is returned by `prob.solve()`. result = prob.solve() # The optimal value for x is stored in `x.value`. print(x.value) # The optimal Lagrange multiplier for a constraint is stored in # `constraint.dual_value`. print(constraints[0].dual_value)
def test_CBC_hard(self): num_states = 5 x = cvxpy.Bool(num_states,name='x') sw_on = cvxpy.Bool(num_states,name='sw_on') sw_off = cvxpy.Bool(num_states,name='sw_off') sw_stay_on = cvxpy.Bool(num_states,name='sw_stay_on') sw_stay_off = cvxpy.Bool(num_states,name='sw_stay_off') fl = cvxpy.Variable(num_states,name='float') constr = [] # can only be one transition type constr.append(sw_on*1.0 + sw_off*1.0 + sw_stay_on*1.0 + sw_stay_off*1.0 == 1) for i in range(num_states): # if switching on, must be now on constr.append(x[i] >= sw_on[i]) # if switchin on, must have been off previously if i>0: constr.append((1-x[i-1]) >= sw_on[i]) # if switching off, must be now off constr.append((1-x[i]) >= sw_off[i]) # if switchin off, must have been on previously if i>0: constr.append(x[i-1] >= sw_off[i]) # if staying on, must be now on constr.append(x[i] >= sw_stay_on[i]) # if staying on, must have been on previously if i>0: constr.append(x[i-1] >= sw_stay_on[i]) # if staying, must be now off constr.append((1-x[i]) >= sw_stay_off[i]) # if staying off, must have been off previously if i>0: constr.append((1-x[i-1]) >= sw_stay_off[i]) # random stuff constr.append(x[1] == 1) constr.append(x[3] == 0) for i in range(num_states): constr.append(fl[i] <= i*sw_on[i]) constr.append(fl[i] >= -i) obj = cvxpy.Maximize(sum(sw_off) + sum(sw_on)) for i in range(num_states): if i%2 == 0: obj += cvxpy.Maximize(fl[i]) else: obj += cvxpy.Maximize(-1*fl[i]) problem = cvxpy.Problem(obj, constr) ret = problem.solve(solver=cvxpy.CBC) self.assertTrue(problem.status in [cvxpy.OPTIMAL, cvxpy.OPTIMAL_INACCURATE]) print(' | '.join(['i','x','sw_on','sw_off','stay_on','stay_off','float'])) for i in range(num_states): row = ' | '.join([ '%1d' % i, '%1d' % int(round(x[i].value)), '%5d' % int(round(sw_on[i].value)), '%6d' % int(round(sw_off[i].value)), '%7d' % int(round(sw_stay_on[i].value)), '%8d' % int(round(sw_stay_off[i].value)), '%f' % fl[i].value ]) print(row)
def main(mat): amat = mat['A'] bmat = mat['B'] print('Input shape:', amat.shape) n = amat.shape[0] qap_func = gen_qap_func(amat, bmat) qap_fhats = {(n,): np.array([1])} variables = {(n,): cp.Variable((1, 1))} #qap_irreps = [(n - 1, 1), (n - 2, 2), (n - 2, 1, 1)] qap_irreps = [(n - 1, 1), (n - 2, 1, 1)] for irrep in [(n - 1, 1), (n - 2, 2), (n - 2, 1, 1)]: qhat = qap_fhat(amat, bmat, irrep) qap_fhats[irrep] = qhat variables[irrep] = make_variable(qhat.shape) c_lambdas = {} c_lambdas_inv = {} for irrep in [(n - 1, 1), (n - 2, 1, 1)]: c = c_lambda(irrep) c_lambdas_inv[irrep] = np.linalg.inv(c) c_lambdas[irrep] = c print(irrep, np.linalg.norm(c), np.allclose([email protected], np.eye(len(c)))) pdb.set_trace() # These need to be functions of variables block_diags = {} #for irrep in qap_irreps: #for irrep in [(n - 1, 1), (n - 2, 2), (n - 2, 1, 1)]: for irrep in [(n - 1, 1), (n - 2, 1, 1)]: birreps, _ = qap_decompose(irrep) print('irrep: {} | decompose: {}'.format(irrep, birreps)) block_diags[irrep] = gen_block_diag_vars(variables, birreps) print('done irrep: {} | decompose: {}'.format(irrep, birreps)) # constraints are functions of the block_diags d_n = hook_length((n,)) / math.factorial(n) d_n1 = hook_length((n - 1, 1)) / math.factorial(n) d_n22 = hook_length((n - 2, 2)) / math.factorial(n) d_n211 = hook_length((n - 2, 1, 1)) / math.factorial(n) obj = \ d_n * cp.sum(cp.multiply(variables[(n,)], qap_fhats[(n,)])) + \ d_n1 * cp.sum(cp.multiply(variables[(n - 1, 1)], qap_fhats[(n - 1, 1)])) + \ d_n22 * cp.sum(cp.multiply(variables[(n - 2, 2)], qap_fhats[(n - 2, 2)])) + \ d_n211 * cp.sum(cp.multiply(variables[(n - 2, 1, 1)], qap_fhats[(n - 2, 1, 1)])) n1_one = np.ones((block_diags[(n-1, 1)].shape[0], 1)) n2_one = np.ones((block_diags[(n-2, 1, 1)].shape[0], 1)) constraints = [ #c_lambdas_inv[(n - 1, 1)] @ block_diags[(n - 1, 1)] @ c_lambdas[(n - 1, 1)] >= 0, #(c_lambdas_inv[(n - 1, 1)] @ block_diags[(n - 1, 1)] @ c_lambdas[(n - 1, 1)]) @ n1_one == 1, #(c_lambdas_inv[(n - 1, 1)] @ block_diags[(n - 1, 1)] @ c_lambdas[(n - 1, 1)]).T @ n1_one == 1, # c_lambdas_inv[(n - 2, 2)] @ block_diags[(n - 2, 2)] @ c_lambdas[(n - 2, 2)] >= 0, c_lambdas_inv[(n - 2, 1, 1)] @ block_diags[(n - 2, 1, 1)] @ c_lambdas[(n - 2, 1, 1)] >= 0, #c_lambdas_inv[(n - 2, 1, 1)] @ block_diags[(n - 2, 1, 1)] @ c_lambdas[(n - 2, 1, 1)] @ n2_one == 1, #(c_lambdas_inv[(n - 2, 1, 1)] @ block_diags[(n - 2, 1, 1)] @ c_lambdas[(n - 2, 1, 1)]).T @ n2_one == 1, variables[(n,)] == 1 ] print(len(constraints)) problem = cp.Problem(cp.Maximize(obj), constraints) #problem.solve(solver=cp.OSQP) result =problem.solve(solver=cp.OSQP, verbose=True) #print(f'Obj: {problem.value:.2f}') perm = c_lambdas_inv[(n-1, 1)] @ block_diags[(n - 1, 1)].value @c_lambdas[(n-1, 1)] perm_tup = c_lambdas_inv[(n-2, 1, 1)] @ block_diags[(n - 2, 1, 1)].value @c_lambdas[(n-2, 1, 1)] res = ((perm @ amat @ perm.T)@ bmat).sum() print('Perm tup sums:', perm_tup.sum(axis=1), perm_tup.sum(axis=0), perm_tup.sum(), perm_tup.shape) print(f'Lower bound: {res} | result: {result}') pdb.set_trace()
def dccp_transform(self): """ problem transformation return: prob_new: a new dcp problem parameters: parameters in the constraints flag: indicate if each constraint is transformed parameters_cost: parameters in the cost function flag_cost: indicate if the cost function is transformed var_slack: a list of slack variables """ # split non-affine equality constraints constr = [] for arg in self.constraints: if str(type(arg)) == "<class 'cvxpy.constraints.zero.Zero'>" and not arg.is_dcp(): constr.append(arg[0]<=arg[1]) constr.append(arg[1]<=arg[0]) else: constr.append(arg) self.constraints = constr constr_new = [] # new constraints parameters = [] flag = [] parameters_cost = [] flag_cost = [] # constraints var_slack = [] # slack for constr in self.constraints: if not constr.is_dcp(): flag.append(1) var_slack.append(cvx.Variable(constr.size[0], constr.size[1])) temp = convexify_para_constr(constr) newcon = temp[0] # new constraint without slack variable right = newcon.args[1] + var_slack[-1] # add slack variable on the right side constr_new.append(newcon.args[0]<=right) # new constraint with slack variable constr_new.append(var_slack[-1]>=0) # add constraint on the slack variable parameters.append(temp[1]) for dom in temp[2]: # domain constr_new.append(dom) else: flag.append(0) constr_new.append(constr) # cost functions if not self.objective.is_dcp(): flag_cost.append(1) temp = convexify_para_obj(self.objective) cost_new = temp[0] # new cost function parameters_cost.append(temp[1]) parameters_cost.append(temp[2]) for dom in temp[3]: # domain constraints constr_new.append(dom) else: flag_cost.append(0) cost_new = self.objective.args[0] # objective tau = cvx.Parameter() parameters.append(tau) if self.objective.NAME == 'minimize': for var in var_slack: cost_new += tau*cvx.sum(var) obj_new = cvx.Minimize(cost_new) else: for var in var_slack: cost_new -= tau*cvx.sum(var) obj_new = cvx.Maximize(cost_new) # new problem prob_new = cvx.Problem(obj_new, constr_new) return prob_new, parameters, flag, parameters_cost, flag_cost, var_slack
import numpy as np np.random.seed(1) n = 10 mu = np.abs(np.random.randn(n, 1)) Sigma = np.random.randn(n, n) Sigma = Sigma.T.dot(Sigma) # Long only portfolio optimization. import cvxpy as cp w = cp.Variable(n) gamma = cp.Parameter(nonneg=True) ret = mu.T*w risk = cp.quad_form(w, Sigma) prob = cp.Problem(cp.Maximize(ret - gamma*risk), [cp.sum(w) == 1, w >= 0]) # Compute trade-off curve. SAMPLES = 10000 risk_data = np.zeros(SAMPLES) ret_data = np.zeros(SAMPLES) gamma_vals = np.logspace(-2, 3, num=SAMPLES) for i in range(SAMPLES): gamma.value = gamma_vals[i] prob.solve() risk_data[i] = cp.sqrt(risk).value ret_data[i] = ret.value # Plot long only trade-off curve.
def _init_objective_UB_LUPI(self, sign=None, **kwargs): self.add_constraint( self.feature_relevance <= sign * self.w_priv[self.lupi_index]) self._objective = cvx.Maximize(self.feature_relevance)
def relaxed_qclp(adj, alpha, fragile, local_budget, reward, teleport, global_budget=None, upper_bounds=None): """ Solves the linear program associated with the relaxed QCLP. Parameters ---------- adj : sp.spmatrix, shape [n, n] Sparse adjacency matrix. alpha : float (1-alpha) teleport[v] is the probability to teleport to node v. fragile : np.ndarray, shape [?, 2] Fragile edges that are under our control. local_budget : np.ndarray, shape [n] Maximum number of local flips per node. reward : np.ndarray, shape [n] Reward vector. teleport : np.ndarray, shape [n] Teleport vector. global_budget : int Global budget. upper_bounds : np.ndarray, shape [n] Upper bound for the values of x_i. Returns ------- xval : np.ndarray, shape [n+len(fragile)] The value of the decision variables. opt_fragile : np.ndarray, shape [?, 2] Optimal fragile edges. obj_value : float Optimal objective value. """ n = adj.shape[0] n_fragile = len(fragile) n_states = n + n_fragile adj = adj.copy() adj_clean = adj.copy() # turn off all existing edges before starting adj = adj.tolil() adj[fragile[:, 0], fragile[:, 1]] = 0 # add an edge from the source node to the new auxiliary variables source_to_aux = sp.lil_matrix((n, n_fragile)) source_to_aux[fragile[:, 0], np.arange(n_fragile)] = 1 original_nodes = sp.hstack((adj, source_to_aux)) original_nodes = sp.diags(1 / original_nodes.sum(1).A1) @ original_nodes # transitions among the original nodes are discounted by alpha original_nodes[:, :n] *= alpha # add an edge from the auxiliary variables back to the source node aux_to_source = sp.lil_matrix((n_fragile, n)) aux_to_source[np.arange(n_fragile), fragile[:, 0]] = 1 turned_off = sp.hstack((aux_to_source, sp.csr_matrix( (n_fragile, n_fragile)))) # add an edge from the auxiliary variables to the destination node aux_to_dest = sp.lil_matrix((n_fragile, n)) aux_to_dest[np.arange(n_fragile), fragile[:, 1]] = 1 turned_on = sp.hstack((aux_to_dest, sp.csr_matrix((n_fragile, n_fragile)))) # transitions from aux nodes when turned on are discounted by alpha turned_on *= alpha trans = sp.vstack((original_nodes, turned_off, turned_on)).tocsr() states = np.arange(n + n_fragile) states = np.concatenate((states, states[-n_fragile:])) c = np.zeros(len(states)) # reward for the original nodes c[:n] = reward # negative reward if we are going back to the source node c[n:n + n_fragile] = -reward[fragile[:, 0]] one_hot = sp.eye(n_states).tocsr() A = one_hot[states] - trans b = np.zeros(n_states) b[:n] = (1 - alpha) * teleport x = cp.Variable(len(c), nonneg=True) # set up the sums of auxiliary variables for local and global budgets frag_adj = sp.lil_matrix((len(c), len(c))) # the indices of the turned off/on auxiliary nodes idxs_off = n + np.arange(n_fragile) idxs_on = n + n_fragile + np.arange(n_fragile) # if the edge exists in the clean graph use the turned off node, otherwise the turned on node exists = adj_clean[fragile[:, 0], fragile[:, 1]].A1 idx_off_on_exists = np.where(exists, idxs_off, idxs_on) # each source node is matched with the correct auxiliary node (off or on) frag_adj[fragile[:, 0], idx_off_on_exists] = 1 deg = (trans != 0).sum(1).A1 unique = np.unique(fragile[:, 0]) # the local budget constraints are sum_i ( x_i_{on/off} * deg_i ) <= budget * x_i) # we index only on the unique source nodes to avoid trivial constraints budget_constraints = [ cp.multiply((frag_adj @ x)[unique], deg[unique]) <= cp.multiply( local_budget[unique], x[unique]) ] if global_budget is not None and upper_bounds is not None: # if we have a bounds matrix (for any PPR vector) we need to compute the upper bounds for the teleport if len(upper_bounds.shape) == 2: upper_bounds = teleport @ upper_bounds # do not consider upper_bounds that are zero nnz_unique = unique[upper_bounds[unique] != 0] # the global constraint is sum_i ( x_i_{on/off} * deg_i / upper(x_i) ) <= budget ) global_constraint = [ (frag_adj @ x)[nnz_unique] @ (deg[nnz_unique] / upper_bounds[nnz_unique]) <= global_budget ] else: if global_budget is not None or upper_bounds is not None: warnings.warn( 'Either global_budget or upper_bounds is provided, but not both. ' 'Solving using only local budget.') global_constraint = [] prob = cp.Problem(objective=cp.Maximize(c * x), constraints=[x * A == b] + budget_constraints + global_constraint) prob.solve(solver='GUROBI', verbose=False) assert prob.status == 'optimal' xval = x.value # reshape the decision variables such that x_ij^0 and x_ij^1 are in the same row opt_fragile_on_off = xval[n:].reshape(2, -1).T.argmax(1) opt_fragile = fragile[opt_fragile_on_off != exists] obj_value = prob.value return xval, opt_fragile, obj_value, prob
def find_allocation_for_graph(self, consumption_graph: ConsumptionGraph): """ this function get a consumption graph and use cvxpy to solve the convex problem to find a proportional allocation. the condition for the convex problem is: 1) each alloc[i][j] >=0 - an agent cant get minus pesent from some item 2) if consumption_graph[i][j] == 0 so alloc[i][j]= 0 . if in the current consumption graph the agent i doesnt consume the item j so in the allocation he is get 0% from this item 3) the proportional condition (by definition) 4) the sum of every column in the allocation == 1 each item divided exactly to 100 percent and after solving the problem - check if the result are better from the "min_sharing_allocation" (meaning if the current allocation as lass shering from "min_sharing_allocation") and update it :param consumption_graph: some given consumption graph :return: update "min_sharing_allocation" # the test are according to the result of ver 1 in GraphCheck >>> v = [[1, 2, 3,4], [4, 5, 6,5], [7, 8, 9,6]] >>> fpap =FairProportionalAllocationProblem(v) >>> g1 = [[0.0, 0.0, 0.0, 1], [1, 1, 1, 1], [0.0, 0.0, 0.0, 1]] >>> g = ConsumptionGraph(g1) >>> print(fpap.find_allocation_for_graph(g)) None >>> g1 = [[0.0, 0.0, 0.0, 1], [1, 1, 1, 1], [1, 0.0, 0.0, 1]] >>> g = ConsumptionGraph(g1) >>> print(fpap.find_allocation_for_graph(g)) None >>> g1 = [[0.0, 0.0, 0.0, 1], [0.0, 1, 1, 1], [1, 0.0, 0.0, 1]] >>> g = ConsumptionGraph(g1) >>> print(fpap.find_allocation_for_graph(g)) None >>> g1 = [[0.0, 0.0, 0.0, 1], [0.0, 1, 1, 1], [1, 1, 0.0, 1]] >>> g = ConsumptionGraph(g1) >>> print(fpap.find_allocation_for_graph(g)) [[0. 0. 0. 0.884] [0. 0.464 1. 0.049] [1. 0.535 0. 0.065]] >>> g1 = [[0.0, 0.0, 0.0, 1], [0.0, 0.0, 1, 1], [1, 1, 0.0, 1]] >>> g = ConsumptionGraph(g1) >>> print(fpap.find_allocation_for_graph(g)) [[0. 0. 0. 0.842] [0. 0. 0.999 0.147] [1. 1. 0. 0.01 ]] >>> g1 = [[0.0, 0.0, 0.0, 1], [0.0, 0.0, 1, 1], [1, 1, 1, 1]] >>> g = ConsumptionGraph(g1) >>> print(fpap.find_allocation_for_graph(g)) [[0. 0. 0. 0.836] [0. 0. 0.994 0.149] [1. 1. 0.005 0.013]] >>> g1 = [[0.0, 0.0, 0.0, 1], [0.0, 1, 1, 1], [1, 0.0, 0.0, 0.0]] >>> g = ConsumptionGraph(g1) >>> print(fpap.find_allocation_for_graph(g)) None >>> g1 = [[0.0, 0.0, 0.0, 1], [0.0, 1, 1, 1], [1, 1, 0.0, 0.0]] >>> g = ConsumptionGraph(g1) >>> print(fpap.find_allocation_for_graph(g)) [[0. 0. 0. 0.856] [0. 0.471 1. 0.143] [1. 0.528 0. 0. ]] """ if (consumption_graph.get_num_of_sharing() == self.graph_generator.num_of_sharing_is_allowed): mat = cvxpy.Variable((self.num_of_agents, self.num_of_items)) constraints = [] # every var >=0 and if there is no edge the var is zero # and proportional condition for i in range(self.num_of_agents): count = 0 for j in range(self.num_of_items): if (consumption_graph.get_graph()[i][j] == 0): constraints.append(mat[i][j] == 0) else: constraints.append(mat[i][j] >= 0) count += mat[i][j] * self.valuation[i][j] constraints.append( count >= sum(self.valuation[i]) / len(self.valuation)) # the sum of each column is 1 (the property on each object is 100%) for i in range(self.num_of_items): constraints.append(sum(mat[:, i]) == 1) objective = cvxpy.Maximize(1) prob = cvxpy.Problem(objective, constraints) try: prob.solve(solver="OSQP") except cvxpy.SolverError: prob.solve(solver="SCS") if prob.status == 'optimal': # prob.solve(solver="SCS") # Returns the optimal value. prob.solve(solver="SCS") # if not (prob.status == 'infeasible'): alloc = Allocation(mat.value) alloc.round() self.min_sharing_number = alloc.num_of_shering() self.min_sharing_allocation = alloc.get_allocation() self.find = True # only for doctet: return (mat.value)
def solver(item: OptimizationDataIn): df_solve = pd.read_json(item.df_solve) above_goal_flag = 0 df_solve = df_solve[df_solve[item.sel_metric] <= item.dtss_goal] if item.minimum_cost_or_pvp: df_solve = df_solve[ df_solve[item.goal_type] >= item.minimum_cost_or_pvp] unique_parts = df_solve['Part_Ref'].unique() descriptions = [x for x in df_solve['Part_Desc']] df_solve = df_solve[df_solve['Part_Ref'].isin(unique_parts)] n_size = df_solve['Part_Ref'].nunique() # Number of different parts if not n_size: return None values = np.array(df_solve[item.goal_type].values.tolist() ) # Costs/Sale prices for each reference, info#1 other_values = df_solve[item.non_goal_type].values.tolist() dtss = np.array(df_solve[item.sel_metric].values.tolist() ) # Days to Sell of each reference, info#2 selection = cp.Variable(n_size, integer=True) dtss_constraint = cp.multiply(selection.T, dtss) total_value = selection @ values # Changed in CVXPY 1.1 if item.max_part_number: problem_testing_2 = cp.Problem(cp.Maximize(total_value), [ dtss_constraint <= item.dtss_goal, selection >= 0, selection <= 100, cp.sum(selection) <= item.max_part_number ]) else: problem_testing_2 = cp.Problem(cp.Maximize(total_value), [ dtss_constraint <= item.dtss_goal, selection >= 0, selection <= 100 ]) result = problem_testing_2.solve(solver=cp.GLPK_MI, verbose=False, parallel=True) if selection.value is not None: if result >= item.goal_value: above_goal_flag = 1 response = { 'selection': [qty for qty in selection.value], 'unique_parts': [part for part in unique_parts], 'descriptions': [desc for desc in descriptions], 'values': [value for value in values], 'other_values': [value for value in other_values], 'dtss': [dts for dts in dtss], 'above_goal_flag': above_goal_flag, 'optimization_total_sum': result } return response
mentor_school_compatability = np.array([[3, 9], [4, 9], [2, 9]]) # compatability # time slot A, time slot B, trait 1, trait 2, trait 3, trait 4 assert mentor_school_compatability.shape[ 0] == num_mentors, "number of mentors in matrix does not match settings" assert mentor_school_compatability.shape[ 1] == num_schools, "number of school in matrix does not match settings" mentor_school_assignments = cvx.Variable((num_mentors, num_schools), boolean=True) # We want to maximize the dot product of compatability and assignments objective = cvx.Maximize( cvx.sum( cvx.multiply(mentor_school_compatability, mentor_school_assignments))) constraints = [] # Every mentor has exactly one school for mentor_index in range(num_mentors): constraints.append(sum(mentor_school_assignments[mentor_index]) == 1) # Every school has at least one mentor for school_index in range(num_schools): num_mentors_for_school = 0 for mentor_index in range(num_mentors): num_mentors_for_school += mentor_school_assignments[mentor_index, school_index] constraints.append(num_mentors_for_school >= 1)
def test_gurobi_warm_start(self): """Make sure that warm starting Gurobi behaves as expected Note: This only checks output, not whether or not Gurobi is warm starting internally """ if cvx.GUROBI in cvx.installed_solvers(): import numpy as np A = cvx.Parameter((2, 2)) b = cvx.Parameter(2) h = cvx.Parameter(2) c = cvx.Parameter(2) A.value = np.array([[1, 0], [0, 0]]) b.value = np.array([1, 0]) h.value = np.array([2, 2]) c.value = np.array([1, 1]) objective = cvx.Maximize(c[0] * self.x[0] + c[1] * self.x[1]) constraints = [ self.x[0] <= h[0], self.x[1] <= h[1], A * self.x == b ] prob = cvx.Problem(objective, constraints) result = prob.solve(solver=cvx.GUROBI, warm_start=True) self.assertEqual(result, 3) self.assertItemsAlmostEqual(self.x.value, [1, 2]) orig_objective = result orig_x = self.x.value # Change A and b from the original values A.value = np.array([[0, 0], [0, 1]]) # <----- Changed b.value = np.array([0, 1]) # <----- Changed h.value = np.array([2, 2]) c.value = np.array([1, 1]) # Without setting update_eq_constrs = False, the results should change to the correct answer result = prob.solve(solver=cvx.GUROBI, warm_start=True) self.assertEqual(result, 3) self.assertItemsAlmostEqual(self.x.value, [2, 1]) # Change h from the original values A.value = np.array([[1, 0], [0, 0]]) b.value = np.array([1, 0]) h.value = np.array([1, 1]) # <----- Changed c.value = np.array([1, 1]) # Without setting update_ineq_constrs = False, the results should change to the correct answer result = prob.solve(solver=cvx.GUROBI, warm_start=True) self.assertEqual(result, 2) self.assertItemsAlmostEqual(self.x.value, [1, 1]) # Change c from the original values A.value = np.array([[1, 0], [0, 0]]) b.value = np.array([1, 0]) h.value = np.array([2, 2]) c.value = np.array([2, 1]) # <----- Changed # Without setting update_objective = False, the results should change to the correct answer result = prob.solve(solver=cvx.GUROBI, warm_start=True) self.assertEqual(result, 4) self.assertItemsAlmostEqual(self.x.value, [1, 2]) else: with self.assertRaises(Exception) as cm: prob = cvx.Problem(cvx.Minimize(cvx.norm(self.x, 1)), [self.x == 0]) prob.solve(solver=cvx.GUROBI, warm_start=True) self.assertEqual(str(cm.exception), "The solver %s is not installed." % cvx.GUROBI)
constraints.append(sum(c9, p9, g9) == 1) constraints.append(sum(c10, p10, g10) == 1) #start adding stuff to objective function to max scores = [] #setting up bucket variables #gender/race constraints.append(2 * mb1 <= m1 + b1) constraints.append(2 * mw1 <= m1 + w1) constraints.append(2 * fb1 <= f1 + b1) constraints.append(2 * fw1 <= f1 + w1) #gender/generation constraints.append(2 * mh1 <= m1 + hh1_1) #race/generation #household/race #household/gender #household/generation scores.append(mb1) objective = cp.Maximize(sum(scores)) prob = cp.Problem(objective, constraints) prob.solve() print "status:", prob.status print "optimal value", prob.value
def maximum_entropy(self, assumptions, **kwargs): prob = cvxpy.Problem( cvxpy.Maximize(cvxpy.sum_entries(cvxpy.entr(self._cvxpy_var))), assumptions + [cvxpy.sum_entries(self._cvxpy_var) == 1] + [self._cvxpy_var >= 0]) prob.solve(**kwargs)
def np_integerizer_cvx(incidence, resid_weights, log_resid_weights, control_importance_weights, total_hh_control_index, lp_right_hand_side, relax_ge_upper_bound, hh_constraint_ge_bound): """ Parameters ---------- incidence : numpy.ndarray(control_count, sample_count) float resid_weights : numpy.ndarray(sample_count,) float log_resid_weights : numpy.ndarray(sample_count,) float control_importance_weights : numpy.ndarray(control_count,) float total_hh_control_index : int lp_right_hand_side : numpy.ndarray(control_count,) float relax_ge_upper_bound : numpy.ndarray(control_count,) float hh_constraint_ge_bound : numpy.ndarray(control_count,) float Returns ------- resid_weights_out : numpy.ndarray(sample_count,) status_text : str """ import cvxpy as cvx STATUS_TEXT = { cvx.OPTIMAL: STATUS_OPTIMAL, cvx.INFEASIBLE: 'INFEASIBLE', cvx.UNBOUNDED: 'UNBOUNDED', cvx.OPTIMAL_INACCURATE: STATUS_FEASIBLE, cvx.INFEASIBLE_INACCURATE: 'INFEASIBLE_INACCURATE', cvx.UNBOUNDED_INACCURATE: 'UNBOUNDED_INACCURATE', None: 'FAILED' } CVX_MAX_ITERS = 300 incidence = incidence.T sample_count, control_count = incidence.shape # - Decision variables for optimization x = cvx.Variable(1, sample_count) # - Create positive continuous constraint relaxation variables relax_le = cvx.Variable(control_count) relax_ge = cvx.Variable(control_count) # FIXME - could ignore as handled by constraint? control_importance_weights[total_hh_control_index] = 0 # - Set objective objective = cvx.Maximize( cvx.sum_entries(cvx.mul_elemwise(log_resid_weights, cvx.vec(x))) - cvx.sum_entries(cvx.mul_elemwise(control_importance_weights, relax_le)) - cvx.sum_entries(cvx.mul_elemwise(control_importance_weights, relax_ge)) ) total_hh_constraint = lp_right_hand_side[total_hh_control_index] # 1.0 unless resid_weights is zero max_x = (~(resid_weights == 0.0)).astype(float).reshape((1, -1)) constraints = [ # - inequality constraints cvx.vec(x * incidence) - relax_le >= 0, cvx.vec(x * incidence) - relax_le <= lp_right_hand_side, cvx.vec(x * incidence) + relax_ge >= lp_right_hand_side, cvx.vec(x * incidence) + relax_ge <= hh_constraint_ge_bound, x >= 0.0, x <= max_x, relax_le >= 0.0, relax_le <= lp_right_hand_side, relax_ge >= 0.0, relax_ge <= relax_ge_upper_bound, # - equality constraint for the total households control cvx.sum_entries(x) == total_hh_constraint, ] prob = cvx.Problem(objective, constraints) assert CVX_SOLVER in cvx.installed_solvers(), \ "CVX Solver '%s' not in installed solvers %s." % (CVX_SOLVER, cvx.installed_solvers()) logger.info("integerizing with '%s' solver." % CVX_SOLVER) try: prob.solve(solver=CVX_SOLVER, verbose=True, max_iters=CVX_MAX_ITERS) except cvx.SolverError: logging.exception( 'Solver error encountered in weight discretization. Weights will be rounded.' ) status_text = STATUS_TEXT[prob.status] if status_text in STATUS_SUCCESS: assert x.value is not None resid_weights_out = np.asarray(x.value)[0] else: assert x.value is None resid_weights_out = resid_weights return resid_weights_out, status_text
constraints = [] for i in range(0, 2): sum = 0 for j in range(0, 3): #Psi<=Bsi probability can be maximum 1 and minimum 0. When bsi=0, then psi=0 when bsi=1, then 0<psi<1 constraints += [probi[i][j] <= Bs[i][j]] sum += probi[i][j] #sum of probabilties should be 1 constraints += [sum == 1] for i in range(0, 3): sum1 = 0 sum2 = 0 for j in range(0, 3): sum1 += probi[1][j] * player1[i][j] sum2 += probi[0][j] * player2[j][i] #sum of probabilites*player's utility should be less than or equal to nash equilibrium utility constraints += [sum1 <= u[0][0]] constraints += [sum2 <= u[1][0]] #Ui-Usi<=BIG(1-Bsi) constraints += [(u[0][0] - sum1) <= big * (1 - Bs[0][i])] constraints += [(u[1][0] - sum2) <= big * (1 - Bs[1][i])] prob = cp.Problem(cp.Maximize(cp.sum(Bs)), constraints) prob.solve() print("Probability Distribution is", probi.value) print("The binary x is", Bs.value) print("The nash utilities are", u.value)
def diamond_norm(choi, **kwargs): r"""Return the diamond norm of the input quantum channel object. This function computes the completely-bounded trace-norm (often referred to as the diamond-norm) of the input quantum channel object using the semidefinite-program from reference [1]. Args: choi(Choi or QuantumChannel): a quantum channel object or Choi-matrix array. kwargs: optional arguments to pass to CVXPY solver. Returns: float: The completely-bounded trace norm :math:`\|\mathcal{E}\|_{\diamond}`. Raises: QiskitError: if CVXPY package cannot be found. Additional Information: The input to this function is typically *not* a CPTP quantum channel, but rather the *difference* between two quantum channels :math:`\|\Delta\mathcal{E}\|_\diamond` where :math:`\Delta\mathcal{E} = \mathcal{E}_1 - \mathcal{E}_2`. Reference: J. Watrous. "Simpler semidefinite programs for completely bounded norms", arXiv:1207.5726 [quant-ph] (2012). .. note:: This function requires the optional CVXPY package to be installed. Any additional kwargs will be passed to the ``cvxpy.solve`` function. See the CVXPY documentation for information on available SDP solvers. """ _cvxpy_check('`diamond_norm`') # Check CVXPY is installed choi = Choi(_input_formatter(choi, Choi, 'diamond_norm', 'choi')) def cvx_bmat(mat_r, mat_i): """Block matrix for embedding complex matrix in reals""" return cvxpy.bmat([[mat_r, -mat_i], [mat_i, mat_r]]) # Dimension of input and output spaces dim_in = choi._input_dim dim_out = choi._output_dim size = dim_in * dim_out # SDP Variables to convert to real valued problem r0_r = cvxpy.Variable((dim_in, dim_in)) r0_i = cvxpy.Variable((dim_in, dim_in)) r0 = cvx_bmat(r0_r, r0_i) r1_r = cvxpy.Variable((dim_in, dim_in)) r1_i = cvxpy.Variable((dim_in, dim_in)) r1 = cvx_bmat(r1_r, r1_i) x_r = cvxpy.Variable((size, size)) x_i = cvxpy.Variable((size, size)) iden = sparse.eye(dim_out) # Watrous uses row-vec convention for his Choi matrix while we use # col-vec. It turns out row-vec convention is requried for CVXPY too # since the cvxpy.kron function must have a constant as its first argument. c_r = cvxpy.bmat([[cvxpy.kron(iden, r0_r), x_r], [x_r.T, cvxpy.kron(iden, r1_r)]]) c_i = cvxpy.bmat([[cvxpy.kron(iden, r0_i), x_i], [-x_i.T, cvxpy.kron(iden, r1_i)]]) c = cvx_bmat(c_r, c_i) # Convert col-vec convention Choi-matrix to row-vec convention and # then take Transpose: Choi_C -> Choi_R.T choi_rt = np.transpose( np.reshape(choi.data, (dim_in, dim_out, dim_in, dim_out)), (3, 2, 1, 0)).reshape(choi.data.shape) choi_rt_r = choi_rt.real choi_rt_i = choi_rt.imag # Constraints cons = [ r0 >> 0, r0_r == r0_r.T, r0_i == - r0_i.T, cvxpy.trace(r0_r) == 1, r1 >> 0, r1_r == r1_r.T, r1_i == - r1_i.T, cvxpy.trace(r1_r) == 1, c >> 0 ] # Objective function obj = cvxpy.Maximize(cvxpy.trace(choi_rt_r @ x_r) + cvxpy.trace(choi_rt_i @ x_i)) prob = cvxpy.Problem(obj, cons) sol = prob.solve(**kwargs) return sol
@author: yemi """ import numpy as np import cvxpy as cv import pandas as pd prev_healthcare=pd.read_csv('raw_tables/preventive_healthcare.csv', sep=';', decimal=',') prev_healthcare['id']=prev_healthcare.index budget=10e6 expenses={'Cardio':1e6, 'Diabete':1e6, 'Cancer':1e6, 'Psychiatric':1e6, 'Neurology':1e6, 'DHO':1e6, 'Orthopedics':1e6} N=cv.Variable((1,5), integer=True) obj=cv.Maximize(sum( sum(expenses[cat]*(1-prev_healthcare[cat].iloc[k])**N[0,k] for cat in expenses.keys()) for k in range(prev_healthcare.shape[0]))) constraint=[sum(N[0,k]*prev_healthcare['Cost'].iloc[k] for k in range(prev_healthcare.shape[0]))<=budget] problem=cv.Problem(obj, constraint) result=problem.solve() print(N.value)