def optimize_picef(cfg, check_edge_success=False): """create and solve a picef model, and return the solution""" if cfg.m is None: create_picef_model(cfg, check_edge_success=check_edge_success) optimize(cfg.m) if cfg.use_chains: matching_chains = kidney_utils.get_optimal_chains( cfg.digraph, cfg.ndds, cfg.edge_success_prob) else: matching_chains = [] cycles_used = [c for c, v in zip(cfg.cycles, cfg.cycle_vars) if v.x > 0.5] cycle_obj = [c for c in cfg.cycle_list if c.grb_var.x > 0.5] sol = OptSolution( ip_model=cfg.m, cycles=cycles_used, cycle_obj=cycle_obj, chains=matching_chains, digraph=cfg.digraph, edge_success_prob=cfg.edge_success_prob, cycle_cap=cfg.max_chain, chain_cap=cfg.max_cycle, ) sol.add_matching_edges(cfg.ndds) kidney_utils.check_validity(sol, cfg.digraph, cfg.ndds, cfg.max_cycle, cfg.max_chain) return cycle_obj, matching_chains, sol
def max_cycles(cfg): ''' Use PICEF to find the maximum number of cycles in a matching... ''' m, _, cycle_vars, _ = create_picef_model(cfg) num_cycles = quicksum(cycle_vars) m.setObjective(num_cycles, GRB.MAXIMIZE) optimize(m) if cfg.verbose: print("maximum number of cycles = %d" % m.objVal) if m.objVal != int(m.objVal): raise Warning("number of cycles is not integer") return int(m.objVal)
def solve_picef_model(cfg, remove_edges=[]): """ solve a picef model using a config object, and return the solution if remove_edges is provided, disallow these edges from being used. """ for e in remove_edges: e.used_var.setAttr(GRB.Attr.UB, 0.0) optimize(cfg.m) if cfg.use_chains: matching_chains = kidney_utils.get_optimal_chains( cfg.digraph, cfg.ndds, cfg.edge_success_prob) else: matching_chains = [] cycles_used = [c for c, v in zip(cfg.cycles, cfg.cycle_vars) if v.x > 0.5] cycle_obj = [c for c in cfg.cycle_list if c.grb_var.x > 0.5] sol = OptSolution( ip_model=cfg.m, cycles=cycles_used, cycle_obj=cycle_obj, chains=matching_chains, digraph=cfg.digraph, edge_success_prob=cfg.edge_success_prob, cycle_cap=cfg.max_chain, chain_cap=cfg.max_cycle, ) sol.add_matching_edges(cfg.ndds) kidney_utils.check_validity(sol, cfg.digraph, cfg.ndds, cfg.max_cycle, cfg.max_chain) # allow removed edges to be used again for e in remove_edges: e.used_var.setAttr(GRB.Attr.UB, 1.0) return sol
def optimize_SAA_picef(cfg, num_weight_measurements, gamma, alpha): m, cycles, cycle_vars, _ = create_picef_model(cfg) # add cycle objects cycle_list = [] for c, var in zip(cycles, cycle_vars): c_obj = Cycle(c) c_obj.add_edges(cfg.digraph.es) c_obj.weight = failure_aware_cycle_weight(c_obj.vs, cfg.digraph, cfg.edge_success_prob) c_obj.grb_var = var cycle_list.append(c_obj) # add variables for each edge weight measurement weight_vars = m.addVars(num_weight_measurements, vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY) for i in range(num_weight_measurements): m.addConstr(weight_vars[i] == -(quicksum(e.used_var * e.weight_list[i] for e in cfg.digraph.es) + quicksum(e.weight_list[i] * e.edge_var for ndd in cfg.ndds for e in ndd.edges))) # auxiliary variable d_var = m.addVar(vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY) # add pi variables & constraints for SAA pi_vars = m.addVars(num_weight_measurements, vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY) for i in range(num_weight_measurements): m.addConstr(pi_vars[i] >= weight_vars[i]) m.addConstr(pi_vars[i] >= (1 + gamma / alpha) * weight_vars[i] - (d_var * gamma) / alpha) # objective obj = (1.0 / float(num_weight_measurements)) * quicksum(pi_vars) + gamma * d_var m.setObjective(obj, sense=GRB.MINIMIZE) if not cfg.use_chains: raise Exception("not implemented") elif cfg.edge_success_prob == 1: pass else: raise Exception("not implemented") optimize(m) pair_edges = [e for e in cfg.digraph.es if e.used_var.x > 0.5] if cfg.use_chains: matching_chains = kidney_utils.get_optimal_chains( cfg.digraph, cfg.ndds, cfg.edge_success_prob) ndd_chain_edges = [ e for ndd in cfg.ndds for e in ndd.edges if e.edge_var.x > 0.5 ] else: ndd_chain_edges = [] matching_chains = [] matching_edges = pair_edges + ndd_chain_edges if cfg.cardinality_restriction is not None: if len(matching_edges) > cfg.cardinality_restriction: raise Warning( "cardinality restriction is violated: restriction = %d edges, matching uses %d edges" % (cfg.cardinality_restriction, len(matching_edges))) cycles_used = [c for c, v in zip(cycles, cycle_vars) if v.x > 0.5] cycle_obj = [c for c in cycle_list if c.grb_var.x > 0.5] sol = OptSolution(ip_model=m, cycles=cycles_used, cycle_obj=cycle_obj, chains=matching_chains, digraph=cfg.digraph, edge_success_prob=cfg.edge_success_prob, chain_restriction=cfg.chain_restriction, cycle_restriction=cfg.cycle_restriction, cycle_cap=cfg.max_chain, chain_cap=cfg.max_cycle, cardinality_restriction=cfg.cardinality_restriction) sol.add_matching_edges(cfg.ndds) kidney_utils.check_validity(sol, cfg.digraph, cfg.ndds, cfg.max_cycle, cfg.max_chain) return sol, matching_edges
def optimise_robust_picef(cfg): m, cycles, cycle_vars, num_edges_var = create_picef_model(cfg) # for use later floor_gamma = np.floor(cfg.gamma) ceil_gamma = np.ceil(cfg.gamma) gamma_frac = cfg.gamma - floor_gamma # add cycle vars cycle_list = [] for c, var in zip(cycles, cycle_vars): c_obj = Cycle(c) c_obj.add_edges(cfg.digraph.es) c_obj.weight = cycle_weight(c_obj.vs, cfg.digraph) c_obj.grb_var = var cycle_list.append(c_obj) m.update() # gamma is integer if gamma_frac == 0: if cfg.use_chains: for ndd in cfg.ndds: for e in ndd.edges: g_var = m.addVar(vtype=GRB.BINARY) d_var = m.addVar(vtype=GRB.BINARY) e.g_var = g_var e.d_var = d_var m.addGenConstrAnd(e.d_var, [e.g_var, e.edge_var]) m.update() # add g and d variables for pair-pair edges for e in cfg.digraph.es: g_var = m.addVar(vtype=GRB.BINARY) d_var = m.addVar(vtype=GRB.BINARY) e.g_var = g_var e.d_var = d_var m.addGenConstrAnd(e.d_var, [e.g_var, e.used_var]) m.update() # gamma is not integer else: if cfg.use_chains: # use both gf (full discount if gf=1, gp=0) and gp (partial discount, if gf=gp=1) for ndd in cfg.ndds: for e in ndd.edges: gf_var = m.addVar(vtype=GRB.BINARY) df_var = m.addVar(vtype=GRB.BINARY) e.gf_var = gf_var e.df_var = df_var m.addGenConstrAnd(e.df_var, [e.gf_var, e.edge_var]) gp_var = m.addVar(vtype=GRB.BINARY) dp_var = m.addVar(vtype=GRB.BINARY) e.gp_var = gp_var e.dp_var = dp_var m.addGenConstrAnd(e.dp_var, [e.gp_var, e.edge_var]) m.update() for e in cfg.digraph.es: gf_var = m.addVar(vtype=GRB.BINARY) df_var = m.addVar(vtype=GRB.BINARY) e.gf_var = gf_var e.df_var = df_var m.addGenConstrAnd(e.df_var, [e.gf_var, e.used_var]) gp_var = m.addVar(vtype=GRB.BINARY) dp_var = m.addVar(vtype=GRB.BINARY) e.gp_var = gp_var e.dp_var = dp_var m.addGenConstrAnd(e.dp_var, [e.gp_var, e.used_var]) m.update() # discount indicators g follow same ordering as the edge discount values (sort in increasing order) if cfg.use_chains: ndd_e = [e for ndd in cfg.ndds for e in ndd.edges] else: ndd_e = [] all_edges = cfg.digraph.es + ndd_e e_sorted = sorted(all_edges, key=lambda x: x.discount, reverse=False) # ordering constraints over g # gamma is integer if gamma_frac == 0: for i in range(len(e_sorted) - 1): m.addConstr(e_sorted[i].g_var <= e_sorted[i + 1].g_var) # gamma is not integer else: for i in range(len(e_sorted) - 1): m.addConstr(e_sorted[i].gf_var <= e_sorted[i + 1].gf_var) m.addConstr(e_sorted[i].gp_var <= e_sorted[i + 1].gp_var) # number of edges used in matching (include all position-indexed vars) # uncertainty budget (number of discounted edges) gamma_var = m.addVar(vtype=GRB.CONTINUOUS) m.addGenConstrMin(gamma_var, [num_edges_var, cfg.gamma]) # add a cardinality restriction if necessary if cfg.cardinality_restriction is not None: m.addConstr(num_edges_var <= cfg.cardinality_restriction) m.update() # limit number of discounted variables # gamma is integer if gamma_frac == 0: m.addConstr(quicksum(e.d_var for e in all_edges) == gamma_var) # gamma is not integer else: h_var = m.addVar(vtype=GRB.BINARY) m.addConstr(cfg.gamma - num_edges_var <= W_small * h_var) m.addConstr(num_edges_var - cfg.gamma <= W_small * (1 - h_var)) m.addConstr( quicksum(e.dp_var for e in all_edges) == h_var * num_edges_var + (1 - h_var) * ceil_gamma) m.addConstr( quicksum(e.df_var for e in all_edges) == h_var * num_edges_var + (1 - h_var) * floor_gamma) # total discount (by edge) # gamma is integer if gamma_frac == 0: total_discount = quicksum(e.discount * e.d_var for e in all_edges) # gamma is not integer else: total_discount = quicksum((1 - gamma_frac) * e.discount * e.df_var for e in all_edges) + \ quicksum(gamma_frac * e.discount * e.dp_var for e in all_edges) # set a variable for the total (optimistic matching weight) total_weight = m.addVar(vtype=GRB.CONTINUOUS) m.update() if not cfg.use_chains: m.addConstr(total_weight == quicksum( failure_aware_cycle_weight(c, cfg.digraph, cfg.edge_success_prob) * var for c, var in zip(cycles, cycle_vars))) obj_expr = total_weight - total_discount elif cfg.edge_success_prob == 1: m.addConstr(total_weight == (quicksum( cycle_weight(c, cfg.digraph) * var for c, var in zip(cycles, cycle_vars)) + quicksum(e.weight * e.edge_var for ndd in cfg.ndds for e in ndd.edges) + quicksum(e.weight * var for e in cfg.digraph.es for var in e.grb_vars))) obj_expr = total_weight - total_discount else: raise Warning("not implemented") m.setObjective(obj_expr, GRB.MAXIMIZE) optimize(m) if gamma_frac == 0: # gamma is integer discounted_pair_edges = [e for e in cfg.digraph.es if e.d_var.x > 0] for e in discounted_pair_edges: e.discount_frac = e.d_var.x if cfg.use_chains: discounted_ndd_edges = [(i_ndd, e) for i_ndd, ndd in enumerate(cfg.ndds) for e in ndd.edges if e.d_var.x > 0.0] for _, e in discounted_ndd_edges: e.discount_frac = e.d_var.x else: # gamma is not integer discounted_pair_edges = [e for e in cfg.digraph.es \ if ((e.df_var.x > 0.0) or (e.dp_var.x > 0.0))] for e in discounted_pair_edges: e.discount_frac = ( 1 - gamma_frac) * e.df_var.x + gamma_frac * e.dp_var.x if cfg.use_chains: discounted_ndd_edges = [(i_ndd, e) for i_ndd, ndd in enumerate(cfg.ndds) for e in ndd.edges \ if ((e.df_var.x > 0.0) or (e.dp_var.x > 0.0))] for _, e in discounted_ndd_edges: e.discount_frac = ( 1 - gamma_frac) * e.df_var.x + gamma_frac * e.dp_var.x if cfg.use_chains: ndd_matching_edges = [ e for ndd in cfg.ndds for e in ndd.edges if e.edge_var.x > 0.5 ] else: ndd_matching_edges = [] used_matching_edges = [e for e in cfg.digraph.es if e.used_var.x > 0.5] matching_edges = ndd_matching_edges + used_matching_edges if cfg.cardinality_restriction is not None: if len(matching_edges) > cfg.cardinality_restriction: raise Warning( "cardinality restriction is violated: restriction = %d edges, matching uses %d edges" % (cfg.cardinality_restriction, len(matching_edges))) chains_used = kidney_utils.get_optimal_chains(cfg.digraph, cfg.ndds, cfg.edge_success_prob) if cfg.use_chains \ else [] cycles_used = [c for c, v in zip(cycles, cycle_vars) if v.x > 0.5] cycle_obj = [c for c in cycle_list if c.grb_var.x > 0.5] sol = OptSolution(ip_model=m, cycles=cycles_used, cycle_obj=cycle_obj, chains=chains_used, digraph=cfg.digraph, edge_success_prob=cfg.edge_success_prob, gamma=cfg.gamma, robust_weight=m.objVal, optimistic_weight=total_weight.x, chain_restriction=cfg.chain_restriction, cycle_restriction=cfg.cycle_restriction, cycle_cap=cfg.max_chain, chain_cap=cfg.max_cycle, cardinality_restriction=cfg.cardinality_restriction) sol.add_matching_edges(cfg.ndds) kidney_utils.check_validity(sol, cfg.digraph, cfg.ndds, cfg.max_cycle, cfg.max_chain) return sol, matching_edges
def optimize_picef(cfg): m, cycles, cycle_vars, _ = create_picef_model(cfg) # add cycle objects cycle_list = [] for c, var in zip(cycles, cycle_vars): c_obj = Cycle(c) c_obj.add_edges(cfg.digraph.es) c_obj.weight = failure_aware_cycle_weight(c_obj.vs, cfg.digraph, cfg.edge_success_prob) c_obj.grb_var = var cycle_list.append(c_obj) if not cfg.use_chains: obj_expr = quicksum( failure_aware_cycle_weight(c, cfg.digraph, cfg.edge_success_prob) * var for c, var in zip(cycles, cycle_vars)) elif cfg.edge_success_prob == 1: obj_expr = (quicksum( cycle_weight(c, cfg.digraph) * var for c, var in zip(cycles, cycle_vars)) + quicksum(e.weight * e.edge_var for ndd in cfg.ndds for e in ndd.edges) + quicksum(e.weight * var for e in cfg.digraph.es for var in e.grb_vars)) else: obj_expr = (quicksum( failure_aware_cycle_weight(c, cfg.digraph, cfg.edge_success_prob) * var for c, var in zip(cycles, cycle_vars)) + quicksum(e.weight * cfg.edge_success_prob * e.edge_var for ndd in cfg.ndds for e in ndd.edges) + quicksum( e.weight * cfg.edge_success_prob**(pos + 1) * var for e in cfg.digraph.es for var, pos in zip(e.grb_vars, e.grb_var_positions))) m.setObjective(obj_expr, GRB.MAXIMIZE) optimize(m) pair_edges = [e for e in cfg.digraph.es if e.used_var.x > 0.5] if cfg.use_chains: matching_chains = kidney_utils.get_optimal_chains( cfg.digraph, cfg.ndds, cfg.edge_success_prob) ndd_chain_edges = [ e for ndd in cfg.ndds for e in ndd.edges if e.edge_var.x > 0.5 ] else: ndd_chain_edges = [] matching_chains = [] matching_edges = pair_edges + ndd_chain_edges if cfg.cardinality_restriction is not None: if len(matching_edges) > cfg.cardinality_restriction: raise Warning( "cardinality restriction is violated: restriction = %d edges, matching uses %d edges" % (cfg.cardinality_restriction, len(matching_edges))) cycles_used = [c for c, v in zip(cycles, cycle_vars) if v.x > 0.5] cycle_obj = [c for c in cycle_list if c.grb_var.x > 0.5] sol = OptSolution(ip_model=m, cycles=cycles_used, cycle_obj=cycle_obj, chains=matching_chains, digraph=cfg.digraph, edge_success_prob=cfg.edge_success_prob, chain_restriction=cfg.chain_restriction, cycle_restriction=cfg.cycle_restriction, cycle_cap=cfg.max_chain, chain_cap=cfg.max_cycle, cardinality_restriction=cfg.cardinality_restriction) sol.add_matching_edges(cfg.ndds) kidney_utils.check_validity(sol, cfg.digraph, cfg.ndds, cfg.max_cycle, cfg.max_chain) return sol, matching_edges
def optimize_DRO_SAA_picef(cfg, num_weight_measurements, gamma, alpha, theta, w_min, w_max): """Solve the DRO-SAA formulation of (Ren, 2020) Arguments: cfg: (OptConfig object) num_weight_measurements: (int). number of weight measurements associated with each edge gamma: (float). parameter balancing between a pure CVar objective (gamma->infinity) and a pure max-expectation objective (gamma=0) alpha: (float). CVar protection level, should be on [0, 1] theta: prediction of distance between assumed distribution and true distribution w_min: (float). assumed minimum edge weight of unknown distribution w_max: (float). assumed maximum edge weight of unknown distribution """ m, cycles, cycle_vars, _ = create_picef_model(cfg) # add cycle objects cycle_list = [] for c, var in zip(cycles, cycle_vars): c_obj = Cycle(c) c_obj.add_edges(cfg.digraph.es) c_obj.weight = failure_aware_cycle_weight(c_obj.vs, cfg.digraph, cfg.edge_success_prob) c_obj.grb_var = var cycle_list.append(c_obj) # add variables for each edge weight measurement weight_vars = m.addVars(num_weight_measurements, vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY) for i in range(num_weight_measurements): m.addConstr(weight_vars[i] == -(quicksum(e.used_var * e.weight_list[i] for e in cfg.digraph.es) + quicksum(e.weight_list[i] * e.edge_var for ndd in cfg.ndds for e in ndd.edges))) # auxiliary variables d_var = m.addVar(vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY, name='d') lam_var = m.addVar(vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY, name='lambda') s_vars = m.addVars(num_weight_measurements, vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY, name='s') # add eta and mu vars for each edge for e in cfg.digraph.es: e.eta_vars = m.addVars(num_weight_measurements, 2, vtype=GRB.CONTINUOUS, lb=0.0, ub=GRB.INFINITY, name='eta') e.mu_vars = m.addVars(num_weight_measurements, 2, vtype=GRB.CONTINUOUS, lb=0.0, ub=GRB.INFINITY, name='mu') for n in cfg.ndds: for e in n.edges: # also add the used_var here, for convenience e.used_var = e.edge_var e.eta_vars = m.addVars(num_weight_measurements, 2, vtype=GRB.CONTINUOUS, lb=0.0, ub=GRB.INFINITY, name='eta') e.mu_vars = m.addVars(num_weight_measurements, 2, vtype=GRB.CONTINUOUS, lb=0.0, ub=GRB.INFINITY, name='mu') # add variables for each edge weight measurement weight_vars = m.addVars(num_weight_measurements, vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY) for i in range(num_weight_measurements): m.addConstr(weight_vars[i] == -(quicksum(e.used_var * e.weight_list[i] for e in cfg.digraph.es) + quicksum(e.weight_list[i] * e.edge_var for ndd in cfg.ndds for e in ndd.edges))) b1 = 0 b2 = -d_var * gamma / alpha # construct a list of all edges, for convenience e_list = cfg.digraph.es + [e for n in cfg.ndds for e in n.edges] # add main constraints for i_measurement in range(num_weight_measurements): # k = 1 edge_sum_1a = quicksum([ e.eta_vars[i_measurement, 0] * (w_max - e.weight_list[i_measurement]) for e in e_list ]) edge_sum_1b = quicksum([ e.mu_vars[i_measurement, 0] * (e.weight_list[i_measurement] - w_min) for e in e_list ]) m.addConstr(b1 + weight_vars[i_measurement] + edge_sum_1a + edge_sum_1b <= s_vars[i_measurement], name=("s_constr_k1_i%d" % i_measurement)) # k = 2 edge_sum_2a = quicksum([ e.eta_vars[i_measurement, 1] * (w_max - e.weight_list[i_measurement]) for e in e_list ]) edge_sum_2b = quicksum([ e.mu_vars[i_measurement, 1] * (e.weight_list[i_measurement] - w_min) for e in e_list ]) m.addConstr(b2 + (1 + gamma / alpha) * weight_vars[i_measurement] + edge_sum_2a + edge_sum_2b <= s_vars[i_measurement], name=("s_constr__k2_i%d" % i_measurement)) # now for the norm sum (using the 1-norm) # for each edge, get |\eta_ik - \mu_ik - a_k|. then sum all of these to obtain the 1-norm e_norm_plus_vars_k1 = m.addVars(len(e_list), vtype=GRB.CONTINUOUS, lb=0.0, ub=GRB.INFINITY, name=("e_norm_plus_k1_i%d" % i_measurement)) e_norm_minus_vars_k1 = m.addVars(len(e_list), vtype=GRB.CONTINUOUS, lb=0.0, ub=GRB.INFINITY, name=("e_norm_minus_k1_i%d" % i_measurement)) e_norm_plus_vars_k2 = m.addVars(len(e_list), vtype=GRB.CONTINUOUS, lb=0.0, ub=GRB.INFINITY, name=("e_norm_plus_k2_i%d" % i_measurement)) e_norm_minus_vars_k2 = m.addVars(len(e_list), vtype=GRB.CONTINUOUS, lb=0.0, ub=GRB.INFINITY, name=("e_norm_minus_k2_i%d" % i_measurement)) for i_e, e in enumerate(e_list): # k = 1 m.addConstr( e_norm_plus_vars_k1[i_e] - e_norm_minus_vars_k1[i_e] == e.eta_vars[i_measurement, 0] - e.mu_vars[i_measurement, 0] + e.used_var) # k = 2 m.addConstr( e_norm_plus_vars_k2[i_e] - e_norm_minus_vars_k2[i_e] == e.eta_vars[i_measurement, 1] - e.mu_vars[i_measurement, 1] + (1 + gamma / alpha) * e.used_var) # the 1-norm must be bounded by lambda, for each measurement and for each k m.addConstr( quicksum(e_norm_plus_vars_k2) + quicksum(e_norm_minus_vars_k2) <= lam_var) m.addConstr( quicksum(e_norm_plus_vars_k1) + quicksum(e_norm_minus_vars_k1) <= lam_var) # objective obj = lam_var * theta + (1.0 / float(num_weight_measurements) ) * quicksum(s_vars) + gamma * d_var m.setObjective(obj, sense=GRB.MINIMIZE) if not cfg.use_chains: raise Exception("not implemented") elif cfg.edge_success_prob == 1: pass else: raise Exception("not implemented") optimize(m) pair_edges = [e for e in cfg.digraph.es if e.used_var.x > 0.5] if cfg.use_chains: matching_chains = kidney_utils.get_optimal_chains( cfg.digraph, cfg.ndds, cfg.edge_success_prob) # matching_chains = [] ndd_chain_edges = [ e for ndd in cfg.ndds for e in ndd.edges if e.edge_var.x > 0.5 ] else: ndd_chain_edges = [] matching_chains = [] matching_edges = pair_edges + ndd_chain_edges if cfg.cardinality_restriction is not None: if len(matching_edges) > cfg.cardinality_restriction: raise Warning( "cardinality restriction is violated: restriction = %d edges, matching uses %d edges" % (cfg.cardinality_restriction, len(matching_edges))) cycles_used = [c for c, v in zip(cycles, cycle_vars) if v.x > 0.5] cycle_obj = [c for c in cycle_list if c.grb_var.x > 0.5] sol = OptSolution(ip_model=m, cycles=cycles_used, cycle_obj=cycle_obj, chains=matching_chains, digraph=cfg.digraph, edge_success_prob=cfg.edge_success_prob, chain_restriction=cfg.chain_restriction, cycle_restriction=cfg.cycle_restriction, cycle_cap=cfg.max_chain, chain_cap=cfg.max_cycle, cardinality_restriction=cfg.cardinality_restriction) sol.add_matching_edges(cfg.ndds) kidney_utils.check_validity(sol, cfg.digraph, cfg.ndds, cfg.max_cycle, cfg.max_chain) return sol, matching_edges
def static_mip_optimal( items, K, valid_responses, time_lim=TIME_LIM, cut_1=True, cut_2=True, start_queries=None, fixed_queries=None, fixed_responses=None, start_rec=None, subproblem_list=None, displayinterval=None, gamma_inconsistencies=0.0, problem_type="maximin", raise_gurobi_time_limit=True, log_problem_size=False, logger=None, u0_type="box", artificial_bounds=False, ): """ finds the robust-optimal query set, given a set of items. input: - items : a list of Item objects - K : the number of queries to be selected - start_queries : list of K queries to use as a warm start. do not need to be sorted. - fixed_queries : list of queries to FIX. length of this list must be <=K. these are fixed as the FIRST queries (order is arbitrary anyhow) - fixed_responses : list of responses for FIX, for the first n <= K queries. (alternative to using arg response_subset) - cut_1 : (bool) use cut restricting values of p and q (p < q) - cut_2 : (bool) use cut restricting order of queries (lexicographical order of (p,q) pairs) - valid_responses : list of ints, either [1, -1, 0] (indifference) or [1, -1] (no indifference) - response_subset : subset of scenarios S, where S[i] is a list of ints {-1, 0, 1}, of len K - logfile: if specified, write a gurobi logfile at this path - gamma_inconsistencies: (float). assumed upper bound of agent inconsistencies. increasing gamma increases the size of the uncertainty set - problem_type : (str). either 'maximin' or 'mmr'. if maximin, solve the maximin robust recommendation problem. if mmr, solve the minimax regret problem. output: - query_list : a list of Query objects - start_rec : dict where keys are response scenarios, values are indices of recommended item """ if fixed_queries is None: fixed_queries = [] assert problem_type in ["maximin", "mmr"] # indifference responses not supported assert set(valid_responses) == {-1, 1} # number of features for each item num_features = len(items[0].features) # polyhedral definition for U^0, B_mat and b_vec B_mat, b_vec = get_u0(u0_type, num_features) # number of items num_items = len(items) # lambda variables (dual variables for the initial uncertainty set): # lam_vars[r,i] is the i^th dual variable (for i = 1,...,m_const) for the r^th response scenario # recall: B_mat (m_const x n), and b_vec (m_const x 1) m_const = len(b_vec) assert B_mat.shape == (m_const, num_features) # get the logfile from the logger, if there is one if logger is not None: log_file = logger.handlers[0].baseFilename else: log_file = None # define the mip model m = create_mip_model(time_lim=time_lim, log_file=log_file, displayinterval=displayinterval) # the objective tau = m.addVar(vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY, name="tau") if problem_type == "maximin": m.setObjective(tau, sense=GRB.MAXIMIZE) if artificial_bounds: # artificial objective bound obj_bound = 1000 m.addConstr(tau <= obj_bound, name="artificial_obj_bound") if problem_type == "mmr": m.setObjective(tau, sense=GRB.MINIMIZE) # artificial objective bound obj_bound = -1000 m.addConstr(tau >= obj_bound, name="artificial_obj_bound") # all possible agent response scenarios if subproblem_list is None: # each subproblem is a single response scenario scenario_list = list(itertools.product(valid_responses, repeat=K)) num_scenarios = int(np.power(len(valid_responses), K)) assert num_scenarios == len(scenario_list) else: # each subproblem should be a single response scenario # assert that every response in the subset is a valid response for r in subproblem_list: assert set(r).difference(set(valid_responses)) == set([]) scenario_list = subproblem_list if fixed_responses is not None: # assert subproblem_list is None # f = len(fixed_responses) # t = tuple(fixed_responses) # assert f <= K # r_list = list(r for r in itertools.product(valid_responses, repeat=K) if r[:f] == t) raise NotImplemented("not implemented") # define integer variables - this is the same for both MMR and maximin problem types p_vars, q_vars, w_vars = add_integer_variables( m, num_items, K, start_queries=start_queries, cut_1=cut_1, cut_2=cut_2, fixed_queries=fixed_queries, ) # now add continuous variables for each response scenario if problem_type == "maximin": y_vars = {} alpha_vars = {} beta_vars = {} v_bar_vars = {} w_bar_vars = {} for i, r in enumerate(scenario_list): ( alpha_vars[r], beta_vars[r], v_bar_vars[r], w_bar_vars[r], ) = add_r_constraints( m, tau, p_vars, q_vars, K, r, i, m_const, items, num_items, num_features, B_mat, b_vec, y_vars=y_vars, problem_type=problem_type, fixed_queries=fixed_queries, gamma_inconsistencies=gamma_inconsistencies, ) if problem_type == "mmr": # store y_vars for each scenario y_vars = {} alpha_vars = {} beta_vars = {} v_bar_vars = {} w_bar_vars = {} for i, r in enumerate(scenario_list): for item in items: ( alpha_vars[r, item.id], beta_vars[r, item.id], v_bar_vars[r, item.id], w_bar_vars[r, item.id], ) = add_r_constraints( m, tau, p_vars, q_vars, K, r, i, m_const, items, num_items, num_features, B_mat, b_vec, y_vars=y_vars, problem_type=problem_type, mmr_item=item, fixed_queries=fixed_queries, gamma_inconsistencies=gamma_inconsistencies, ) m.update() if log_problem_size and logger is not None: logger.info(f"total variables: {m.numvars}") logger.info(f"total constraints: {m.numconstrs}") # m.params.DualReductions = 0 try: optimize(m, raise_warnings=False) except GurobiTimeLimit: if raise_gurobi_time_limit: raise GurobiTimeLimit if m.status == GRB.TIME_LIMIT: time_limit_reached = True else: time_limit_reached = False if artificial_bounds and logger is not None: if abs(tau.x - obj_bound) <= 1e-3: logger.info( f"problem is likely unbounded: tau = obj_bound = {obj_bound}") try: # get the indices of the optimal queries p_inds = [-1 for _ in range(K)] q_inds = [-1 for _ in range(K)] for k in range(K): p_list = [np.round(p_vars[i, k].x) for i in range(num_items)] p_inds[k] = int(np.argwhere(p_list)) q_list = [np.round(q_vars[i, k].x) for i in range(num_items)] q_inds[k] = int(np.argwhere(q_list)) except: # if failed for some reason... lp_file = generate_filepath(os.getenv("HOME"), "static_milp_problem", "lp") m.write(lp_file) if logger is not None: logger.info( f"static MIP failed, model status = {m.status}, writing LP file to {lp_file}" ) raise StaticMIPFailed # get indices of recommended items rec_inds = {} # for i_r, r in enumerate(r_list): # y_list = [np.round(y_vars[i_r][i].x) for i in range(num_items)] # rec_inds[r] = int(np.argwhere(y_list)) return ( [Query(items[p_inds[k]], items[q_inds[k]]) for k in range(K)], m.objVal, time_limit_reached, rec_inds, )
def feasibility_subproblem( z_vec_list, valid_responses, K, items, B_mat, b_vec, time_lim=TIME_LIM, problem_type="maximin", gamma_inconsistencies=0.0, ): # solve the scenario decomposition subproblem. # indifference response is not supported assert set(valid_responses) == set([-1, 1]) assert problem_type in ["maximin", "mmr"] num_items = len(items) num_features = len(items[0].features) # recall: B_mat (m_const x n), and b_vec (m_const x 1) m_const = len(b_vec) assert B_mat.shape == (m_const, num_features) m = create_mip_model(time_lim=time_lim) m.params.OptimalityTol = 1e-8 if gamma_inconsistencies > 0: xi_vars = m.addVars(K, lb=0.0, ub=GRB.INFINITY) m.addConstr(quicksum(xi_vars) <= gamma_inconsistencies) else: xi_vars = np.zeros(K) # objective value theta_var = m.addVar( vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY, name="theta" ) # decision variables for response scenario # s_k = s_plus - s_minus, and either s_plus or s_minus == 1 s_plus_vars = m.addVars(K, vtype=GRB.BINARY, name="s_plus") s_minus_vars = m.addVars(K, vtype=GRB.BINARY, name="s_minus") # only one response is possible for k in range(K): m.addConstr(s_plus_vars[k] + s_minus_vars[k] == 1, name="s_const") m.addSOS(GRB.SOS_TYPE1, [s_plus_vars[k], s_minus_vars[k]]) # add constraints for the utility of each item x # u_vars for each item u_vars = m.addVars( num_items, num_features, vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY, name="u", ) # v_vars_list[i] is the list of variables to select the MMR item in response to item i v_var_list = [None for _ in range(num_items)] nu_vars_list = [None for _ in range(num_items)] for i_item, item in enumerate(items): if problem_type == "mmr": # for mmr only: use binary variables to select the item that maximizes regret # v_vars[i, j] = 1 if item j is selected to maximize regret for item i # for each i, y_vars[i, j] can be >0 for only one j (sos1) v_vars = m.addVars(num_items, vtype=GRB.BINARY) m.addConstr(quicksum(v_vars) == 1.0) m.addSOS(GRB.SOS_TYPE1, [v_vars[i] for i in range(num_items)]) v_var_list[i_item] = v_vars nu_vars = m.addVars( num_items, num_features, vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY, ) nu_vars_list[i_item] = nu_vars # linearize the term nu_ij = v_i * u_j for i in range(num_items): for j in range(num_features): m.addConstr(nu_vars[i, j] <= M * v_vars[i]) m.addConstr(nu_vars[i, j] >= -M * v_vars[i]) m.addConstr( nu_vars[i, j] <= u_vars[i_item, j] + M * (1.0 - v_vars[i]) ) m.addConstr( nu_vars[i, j] >= u_vars[i_item, j] - M * (1.0 - v_vars[i]) ) # U^0 constraints for each u^x for i_row in range(m_const): m.addConstr( quicksum( B_mat[i_row, i_feat] * u_vars[i_item, i_feat] for i_feat in range(num_features) ) >= b_vec[i_row], name=("U0_const_row_r%d_i%d" % (i_row, i_item)), ) if problem_type == "maximin": m.addConstr( theta_var >= quicksum( [ u_vars[i_item, i_feat] * item.features[i_feat] for i_feat in range(num_features) ] ), name=("theta_constr_i%d" % i_item), ) if problem_type == "mmr": rhs_1 = quicksum( [ quicksum( [nu_vars[i, j] * items[i].features[j] for i in range(num_items)] ) for j in range(num_features) ] ) rhs_2 = quicksum( [ u_vars[i_item, i_feat] * item.features[i_feat] for i_feat in range(num_features) ] ) m.addConstr(theta_var <= rhs_1 - rhs_2, name=("theta_constr_i%d" % i_item)) # add constraints on U(z, s) for i_k, z_vec in enumerate(z_vec_list): m.addConstr( quicksum( [ u_vars[i_item, i_feat] * z_vec[i_feat] for i_feat in range(num_features) ] ) + xi_vars[i_k] >= -M * (1 - s_plus_vars[i_k]), name=("U_s_plus_k%d" % i_k), ) m.addConstr( quicksum( [ u_vars[i_item, i_feat] * z_vec[i_feat] for i_feat in range(num_features) ] ) - xi_vars[i_k] <= M * (1 - s_minus_vars[i_k]), name=("U_s_minus_k%d" % i_k), ) if problem_type == "maximin": m.setObjective(theta_var, sense=GRB.MINIMIZE) if problem_type == "mmr": m.setObjective(theta_var, sense=GRB.MAXIMIZE) m.update() # set dualreductions = 0 to distinguish between infeasible/unbounded # m.params.DualReductions = 0 optimize(m) try: # get the optimal response scenario s_opt = [ int(round(s_plus_vars[i_k].x - s_minus_vars[i_k].x)) for i_k in range(K) ] objval = m.objval except Exception as e: print(e) raise return s_opt, objval
def optimize_dro_saa_edge_existence(cfg, num_measurements, gamma, alpha): """ solve the PICEF model with DRO-SAA objective for edge existence uncertainty this requires that each edge has the property e.realizations: a binary vector of length num_measurements """ m, cycles, cycle_vars, _ = create_picef_model( cfg, add_o_vars=True, num_o_vars=num_measurements ) # add cycle objects cycle_list = [] for c, var in zip(cycles, cycle_vars): c_obj = Cycle(c) c_obj.add_edges(cfg.digraph.es) c_obj.weight = expected_cycle_weight(c_obj) c_obj.grb_var = var # for DRO-SAA - keep track of cycle realizations c_obj.realizations = [ min(e.realizations[n] for e in c_obj.edges) for n in range(num_measurements) ] cycle_list.append(c_obj) # add weight variables for each realization w_vars = m.addVars(num_measurements, lb=-GRB.INFINITY, ub=GRB.INFINITY) for n in range(num_measurements): # objective for the n^th realization m.addConstr( w_vars[n] == ( -quicksum( e.weight * big_o_var for i in range(cfg.max_chain - 1) for v in cfg.digraph.vs for e, big_o_var in zip(v.edges_in[i], v.big_o_vars_in[i][n]) ) - quicksum(c.weight * c.realizations[n] * c.grb_var for c in cycle_list) ) ) d_var = m.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY) # define pi variables pi_vars = m.addVars(num_measurements, lb=0, ub=GRB.INFINITY) for n in range(num_measurements): m.addConstr(pi_vars[n] >= w_vars[n] - d_var) # define objective obj_expr = (1.0 / float(num_measurements)) * quicksum(w_vars) + gamma * ( d_var + (1.0 / (alpha * float(num_measurements))) * quicksum(pi_vars) ) m.setObjective(obj_expr, GRB.MINIMIZE) optimize(m) pair_edges = [e for e in cfg.digraph.es if e.used_var.x > 0.5] if cfg.use_chains: matching_chains = kidney_utils.get_optimal_chains( cfg.digraph, cfg.ndds, cfg.edge_success_prob ) ndd_chain_edges = [ e for ndd in cfg.ndds for e in ndd.edges if e.edge_var.x > 0.5 ] else: ndd_chain_edges = [] matching_chains = [] matching_edges = pair_edges + ndd_chain_edges if cfg.cardinality_restriction is not None: if len(matching_edges) > cfg.cardinality_restriction: raise Warning( "cardinality restriction is violated: restriction = %d edges, matching uses %d edges" % (cfg.cardinality_restriction, len(matching_edges)) ) cycles_used = [c for c, v in zip(cycles, cycle_vars) if v.x > 0.5] cycle_obj = [c for c in cycle_list if c.grb_var.x > 0.5] sol = OptSolution( ip_model=m, cycles=cycles_used, cycle_obj=cycle_obj, chains=matching_chains, digraph=cfg.digraph, edge_success_prob=cfg.edge_success_prob, chain_restriction=cfg.chain_restriction, cycle_restriction=cfg.cycle_restriction, cycle_cap=cfg.max_chain, chain_cap=cfg.max_cycle, cardinality_restriction=cfg.cardinality_restriction, ) sol.add_matching_edges(cfg.ndds) kidney_utils.check_validity( sol, cfg.digraph, cfg.ndds, cfg.max_cycle, cfg.max_chain ) return cycle_obj, matching_chains
def optimize_picef_heterogeneous_edge_prob(cfg): """ solve the PICEF model with heterogeneous edge success probabilities of Ren, McElfresh, Bidkhori, Dickerson (2020) this requires that each edge has the property edge.success_prob """ m, cycles, cycle_vars, _ = create_picef_model(cfg, add_o_vars=True) # add cycle objects cycle_list = [] for c, var in zip(cycles, cycle_vars): c_obj = Cycle(c) c_obj.add_edges(cfg.digraph.es) c_obj.weight = expected_cycle_weight(c_obj) c_obj.grb_var = var cycle_list.append(c_obj) # add objective chain_weight = quicksum( e.weight * big_o_var for i in range(cfg.max_chain - 1) for v in cfg.digraph.vs for e, big_o_var in zip(v.edges_in[i], v.big_o_vars_in[i]) ) obj_expr = chain_weight + quicksum(c.weight * c.grb_var for c in cycle_list) m.setObjective(obj_expr, GRB.MAXIMIZE) optimize(m) pair_edges = [e for e in cfg.digraph.es if e.used_var.x > 0.5] if cfg.use_chains: matching_chains = kidney_utils.get_optimal_chains( cfg.digraph, cfg.ndds, cfg.edge_success_prob ) ndd_chain_edges = [ e for ndd in cfg.ndds for e in ndd.edges if e.edge_var.x > 0.5 ] else: ndd_chain_edges = [] matching_chains = [] matching_edges = pair_edges + ndd_chain_edges if cfg.cardinality_restriction is not None: if len(matching_edges) > cfg.cardinality_restriction: raise Warning( "cardinality restriction is violated: restriction = %d edges, matching uses %d edges" % (cfg.cardinality_restriction, len(matching_edges)) ) cycles_used = [c for c, v in zip(cycles, cycle_vars) if v.x > 0.5] cycle_obj = [c for c in cycle_list if c.grb_var.x > 0.5] sol = OptSolution( ip_model=m, cycles=cycles_used, cycle_obj=cycle_obj, chains=matching_chains, digraph=cfg.digraph, edge_success_prob=cfg.edge_success_prob, chain_restriction=cfg.chain_restriction, cycle_restriction=cfg.cycle_restriction, cycle_cap=cfg.max_chain, chain_cap=cfg.max_cycle, cardinality_restriction=cfg.cardinality_restriction, ) sol.add_matching_edges(cfg.ndds) kidney_utils.check_validity( sol, cfg.digraph, cfg.ndds, cfg.max_cycle, cfg.max_chain ) return cycle_obj, matching_chains
def solve_recommendation_problem( answered_queries, items, problem_type, gamma=0, verbose=False, fixed_rec_item=None, u0_type="box", logger=None, ): """solve the robust recommendation problem, and return the recommended item and worst-case utility vector""" valid_responses = [-1, 1] assert set([q.response for q in answered_queries]).issubset(set(valid_responses)) assert problem_type in ["maximin", "mmr"] assert gamma >= 0 # some constants K = len(answered_queries) num_features = len(items[0].features) z_vectors = [q.z for q in answered_queries] responses = [q.response for q in answered_queries] # polyhedral definition for U^0, b_mat and b_vec b_mat, b_vec = get_u0(u0_type, num_features) # define beta vars (more dual variables) m_const = len(b_vec) if logger is not None: log_file = logger.handlers[0].baseFilename logger.debug("writing gurobi logs for recommendation problem") else: log_file = None # set up the Gurobi model m = create_mip_model(verbose=verbose, log_file=log_file) # if the recommended item is fixed, don't create y vars if fixed_rec_item is not None: assert isinstance(fixed_rec_item, Item) y_vars = None else: # y vars : to select x^r, the recommended item in scenario r y_vars = m.addVars(len(items), vtype=GRB.BINARY, name="y") m.addSOS(GRB.SOS_TYPE1, [y_vars[i] for i in range(len(items))]) m.addConstr(quicksum(y_vars[i] for i in range(len(items))) == 1, name="y_constr") fixed_rec_item = None # add dual variables if problem_type == "maximin": mu_var, alpha_vars, beta_vars = add_rec_dual_variables( m, K, gamma, problem_type, m_const, y_vars, num_features, items, b_mat, responses, z_vectors, fixed_rec_item, ) if problem_type == "mmr": theta_var = m.addVar(vtype=GRB.CONTINUOUS, lb=-GRB.INFINITY, ub=GRB.INFINITY, name="theta") beta_vars = {} alpha_vars = {} mu_vars = {} for item in items: ( mu_vars[item.id], alpha_vars[item.id], beta_vars[item.id], ) = add_rec_dual_variables( m, K, gamma, problem_type, m_const, y_vars, num_features, items, b_mat, responses, z_vectors, fixed_rec_item, mmr_item=item, ) m.addConstr(theta_var >= quicksum( [b_vec[j] * beta_vars[item.id][j] for j in range(m_const)]) + gamma * mu_vars[item.id]) if problem_type == "maximin": obj = (quicksum([b_vec[j] * beta_vars[j] for j in range(m_const)]) + gamma * mu_var) m.setObjective(obj, sense=GRB.MAXIMIZE) elif problem_type == "mmr": m.setObjective(theta_var, sense=GRB.MINIMIZE) m.Params.DualReductions = 0 optimize(m) # --- gather results --- # if the model is unbounded (uncertainty set it empty), return None if m.status == GRB.INF_OR_UNBD: lp_file = os.path.join(os.getenv("HOME"), "recommendation_problem_infeas_unbd.lp") ilp_file = os.path.join(os.getenv("HOME"), "recommendation_problem_infeas_unbd.ilp") print( f"badly-behaved model. writing lp to: {lp_file}, writing ilp to: {ilp_file}" ) m.computeIIS() m.write(lp_file) m.write(ilp_file) raise Exception("model infeasible or unbounded") if m.status == GRB.UNBOUNDED: lp_file = os.path.join(os.getenv("HOME"), "recommendation_problem_infeas_unbd.lp") print(f"badly-behaved model. writing lp to: {lp_file}") m.write(lp_file) raise Exception("model is unbounded") assert m.status == GRB.OPTIMAL if fixed_rec_item is not None: return m.objVal, fixed_rec_item else: # find the recommended item y_vals = np.array([var.x for var in y_vars.values()]) selected_items = np.argwhere(y_vals > 0.5) # there can only be one recommended item assert len(selected_items) == 1 recommended_item = items[selected_items[0][0]] # # finally, find the minimum u-vector return m.objVal, recommended_item