def _gurobi_minimaxav(profile, committeesize, resolute): def set_opt_model_func(model, in_committee): max_hamming_distance = model.addVar( lb=0, ub=profile.num_cand, vtype=gb.GRB.INTEGER, name="max_hamming_distance", ) model.addConstr( gb.quicksum(in_committee[cand] for cand in profile.candidates) == committeesize) for voter in profile: not_approved = [ cand for cand in profile.candidates if cand not in voter.approved ] # maximum Hamming distance is greater of equal than the Hamming distances # between individual voters and the committee model.addConstr(max_hamming_distance >= gb.quicksum( 1 - in_committee[cand] for cand in voter.approved) + gb.quicksum(in_committee[cand] for cand in not_approved)) # maximizing the negative distance makes code more similar to the other methods here model.setObjective(-max_hamming_distance, gb.GRB.MAXIMIZE) committees = _optimize_rule_gurobi(set_opt_model_func, profile, committeesize, resolute=resolute) return sorted_committees(committees)
def _gurobi_monroe(profile, committeesize, resolute): def set_opt_model_func(model, in_committee): num_voters = len(profile) # optimization goal: variable "satisfaction" satisfaction = model.addVar(ub=num_voters, vtype=gb.GRB.INTEGER, name="satisfaction") model.addConstr( gb.quicksum(in_committee[cand] for cand in profile.candidates) == committeesize) # a partition of voters into committeesize many sets partition = model.addVars(profile.num_cand, len(profile), vtype=gb.GRB.INTEGER, lb=0, name="partition") for i in range(len(profile)): # every voter has to be part of a voter partition set model.addConstr( gb.quicksum(partition[(cand, i)] for cand in profile.candidates) == 1) for cand in profile.candidates: # every voter set in the partition has to contain # at least (num_voters // committeesize) candidates model.addConstr( gb.quicksum(partition[(cand, j)] for j in range(len(profile))) >= ( num_voters // committeesize - num_voters * (1 - in_committee[cand]))) # every voter set in the partition has to contain # at most ceil(num_voters/committeesize) candidates model.addConstr( gb.quicksum(partition[(cand, j)] for j in range(len(profile))) <= ( num_voters // committeesize + bool(num_voters % committeesize) + num_voters * (1 - in_committee[cand]))) # if in_committee[i] = 0 then partition[(i,j) = 0 model.addConstr( gb.quicksum(partition[(cand, j)] for j in range(len(profile))) <= num_voters * in_committee[cand]) # constraint for objective variable "satisfaction" model.addConstr( gb.quicksum(partition[(cand, j)] * (cand in profile[j].approved) for j in range(len(profile)) for cand in profile.candidates) >= satisfaction) # optimization objective model.setObjective(satisfaction, gb.GRB.MAXIMIZE) committees = _optimize_rule_gurobi(set_opt_model_func, profile, committeesize, resolute=resolute) return sorted_committees(committees)
def _mip_thiele_methods(profile, committeesize, scorefct, resolute, solver_id): def set_opt_model_func(model, profile, in_committee, committeesize, previously_found_committees, scorefct): # utility[(voter, l)] contains (intended binary) variables counting the number of approved # candidates in the selected committee by `voter`. This utility[(voter, l)] is true for # exactly the number of candidates in the committee approved by `voter` for all # l = 1...committeesize. # # If scorefct(l) > 0 for l >= 1, we assume that scorefct is monotonic decreasing and # therefore in combination with the objective function the following interpreation is # valid: # utility[(voter, l)] indicates whether `voter` approves at least l candidates in the # committee (this is the case for scorefct "pav", "slav" or "geom"). utility = {} for i, voter in enumerate(profile): for l in range(1, committeesize + 1): utility[(voter, l)] = model.add_var(var_type=mip.BINARY, name=f"utility-v{i}-l{l}") # TODO: could be faster with lb=0.0, ub=1.0, var_type=mip.CONTINUOUS # constraint: the committee has the required size model += mip.xsum(in_committee) == committeesize # constraint: utilities are consistent with actual committee for voter in profile: model += mip.xsum( utility[voter, l] for l in range(1, committeesize + 1)) == mip.xsum( in_committee[cand] for cand in voter.approved) # find a new committee that has not been found yet by excluding previously found committees for committee in previously_found_committees: model += mip.xsum(in_committee[cand] for cand in committee) <= committeesize - 1 # objective: the PAV score of the committee model.objective = mip.maximize( mip.xsum( float(scorefct(l)) * voter.weight * utility[(voter, l)] for voter in profile for l in range(1, committeesize + 1))) score_values = [scorefct(l) for l in range(1, committeesize + 1)] if not all(first > second or first == second == 0 for first, second in zip(score_values, score_values[1:])): raise ValueError("scorefct must be monotonic decreasing") committees = _optimize_rule_mip( set_opt_model_func, profile, committeesize, scorefct=scorefct, resolute=resolute, solver_id=solver_id, ) return sorted_committees(committees)
def _gurobi_minimaxphragmen(profile, committeesize, resolute): """ILP for Phragmen's minimax rule (minimax-Phragmen), using Gurobi. Minimizes the maximum load. Warning: does not include the lexicographic optimization as specified in Markus Brill, Rupert Freeman, Svante Janson and Martin Lackner. Phragmen's Voting Methods and Justified Representation. https://arxiv.org/abs/2102.12305 Instead: minimizes the maximum load (without consideration of the second-, third-, ...-largest load """ def set_opt_model_func(model, in_committee): load = {} for cand in profile.candidates: for i, voter in enumerate(profile): load[(voter, cand)] = model.addVar(ub=1.0, lb=0.0, name=f"load{i}-{cand}") # constraint: the committee has the required size model.addConstr( gb.quicksum(in_committee[cand] for cand in profile.candidates) == committeesize) for cand in profile.candidates: for voter in profile: if cand not in voter.approved: load[(voter, cand)] = 0 # a candidate's load is distributed among his approvers for cand in profile.candidates: model.addConstr( gb.quicksum( voter.weight * load[(voter, cand)] for voter in profile if cand in profile.candidates) == in_committee[cand]) loadbound = model.addVar(lb=0, ub=committeesize, name="loadbound") for voter in profile: model.addConstr( gb.quicksum(load[(voter, cand)] for cand in voter.approved) <= loadbound) # maximizing the negative distance makes code more similar to the other methods here model.setObjective(-loadbound, gb.GRB.MAXIMIZE) committees = _optimize_rule_gurobi(set_opt_model_func, profile, committeesize, resolute=resolute) return sorted_committees(committees)
def _ortools_cc(profile, committeesize, resolute): def set_opt_model_func( model, profile, in_committee, committeesize, previously_found_committees, ): num_voters = len(profile) satisfaction = [ model.NewBoolVar(name=f"satisfaction-of-{voter_id}") for voter_id in range(num_voters) ] model.Add( sum(in_committee[cand] for cand in profile.candidates) == committeesize) for voter_id in range(num_voters): model.Add(satisfaction[voter_id] <= sum( in_committee[cand] for cand in profile[voter_id].approved)) # satisfaction is boolean # find a new committee that has not been found before for committee in previously_found_committees: model.Add( sum(in_committee[cand] for cand in committee) <= committeesize - 1) # maximizing the negative distance makes code more similar to the other methods here if profile.has_unit_weights(): model.Maximize( sum(satisfaction[voter_id] for voter_id in range(num_voters))) else: model.Maximize( sum(satisfaction[voter_id] * profile[voter_id].weight for voter_id in range(num_voters))) if not all(isinstance(voter.weight, int) for voter in profile): raise TypeError( f"_ortools_cc requires integer weights (encountered {[voter.weight for voter in profile]}." ) committees = _optimize_rule_ortools( set_opt_model_func, profile, committeesize, resolute=resolute, ) return sorted_committees(committees)
def _gurobi_thiele_methods(profile, committeesize, scorefct, resolute): def set_opt_model_func(model, in_committee): # utility[(voter, l)] contains (intended binary) variables counting the number of approved # candidates in the selected committee by `voter`. This utility[(voter, l)] is true for # exactly the number of candidates in the committee approved by `voter` for all # l = 1...committeesize. # # If scorefct(l) > 0 for l >= 1, we assume that scorefct is monotonic decreasing and # therefore in combination with the objective function the following interpreation is # valid: # utility[(voter, l)] indicates whether `voter` approves at least l candidates in the # committee (this is the case for scorefct "pav", "slav" or "geom"). utility = {} for voter in profile: for l in range(1, committeesize + 1): utility[(voter, l)] = model.addVar(ub=1.0) # should be binary. this is guaranteed since the objective # is maximal if all utilitity-values are either 0 or 1. # using vtype=gb.GRB.BINARY does not change result, but makes things slower a bit # constraint: the committee has the required size model.addConstr(gb.quicksum(in_committee) == committeesize) # constraint: utilities are consistent with actual committee for voter in profile: model.addConstr( gb.quicksum( utility[voter, l] for l in range(1, committeesize + 1)) == gb.quicksum( in_committee[cand] for cand in voter.approved)) # objective: the PAV score of the committee model.setObjective( gb.quicksum( float(scorefct(l)) * voter.weight * utility[(voter, l)] for voter in profile for l in range(1, committeesize + 1)), gb.GRB.MAXIMIZE, ) score_values = [scorefct(l) for l in range(1, committeesize + 1)] if not all(first > second or first == second == 0 for first, second in zip(score_values, score_values[1:])): raise ValueError("scorefct must be monotonic decreasing") committees = _optimize_rule_gurobi(set_opt_model_func, profile, committeesize, resolute) return sorted_committees(committees)
def _ortools_cc(profile, committeesize, resolute, max_num_of_committees): def set_opt_model_func( model, profile, in_committee, committeesize, ): num_voters = len(profile) satisfaction = [ model.NewBoolVar(name=f"satisfaction-of-{voter_id}") for voter_id in range(num_voters) ] model.Add( sum(in_committee[cand] for cand in profile.candidates) == committeesize) for voter_id in range(num_voters): model.Add(satisfaction[voter_id] <= sum( in_committee[cand] for cand in profile[voter_id].approved)) # satisfaction is boolean # maximizing the negative distance makes code more similar to the other methods here if profile.has_unit_weights(): model.Maximize( sum(satisfaction[voter_id] for voter_id in range(num_voters))) else: model.Maximize( sum(satisfaction[voter_id] * profile[voter_id].weight for voter_id in range(num_voters))) if not all(isinstance(voter.weight, int) for voter in profile): raise TypeError(f"_ortools_cc requires integer weights " f"(encountered {[voter.weight for voter in profile]}.") committees = _optimize_rule_ortools( set_opt_model_func, profile, committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, name="CC", committeescorefct=functools.partial(scores.thiele_score, "cc"), ) return sorted_committees(committees)
def _ortools_minimaxav(profile, committeesize, resolute): def set_opt_model_func( model, profile, in_committee, committeesize, previously_found_committees, ): max_hamming_distance = model.NewIntVar(lb=0, ub=profile.num_cand, name="max_hamming_distance") model.Add( sum(in_committee[cand] for cand in profile.candidates) == committeesize) for voter in profile: not_approved = [ cand for cand in profile.candidates if cand not in voter.approved ] # maximum Hamming distance is greater of equal than the Hamming distances # between individual voters and the committee model.Add(max_hamming_distance >= sum(1 - in_committee[cand] for cand in voter.approved) + sum(in_committee[cand] for cand in not_approved)) # find a new committee that has not been found before for committee in previously_found_committees: model.Add( sum(in_committee[cand] for cand in committee) <= committeesize - 1) # maximizing the negative distance makes code more similar to the other methods here model.Maximize(-max_hamming_distance) committees = _optimize_rule_ortools( set_opt_model_func, profile, committeesize, resolute=resolute, ) return sorted_committees(committees)
def _mip_minimaxav(profile, committeesize, resolute, solver_id): def set_opt_model_func(model, profile, in_committee, committeesize, previously_found_committees, scorefct): max_hamming_distance = model.add_var( var_type=mip.INTEGER, lb=0, ub=profile.num_cand, name="max_hamming_distance", ) model += mip.xsum(in_committee[cand] for cand in profile.candidates) == committeesize for voter in profile: not_approved = [ cand for cand in profile.candidates if cand not in voter.approved ] # maximum Hamming distance is greater of equal than the Hamming distances # between individual voters and the committee model += max_hamming_distance >= mip.xsum( 1 - in_committee[cand] for cand in voter.approved) + mip.xsum( in_committee[cand] for cand in not_approved) # find a new committee that has not been found before for committee in previously_found_committees: model += mip.xsum(in_committee[cand] for cand in committee) <= committeesize - 1 # maximizing the negative distance makes code more similar to the other methods here model.objective = mip.maximize(-max_hamming_distance) committees = _optimize_rule_mip( set_opt_model_func, profile, committeesize, scorefct=None, resolute=resolute, solver_id=solver_id, ) return sorted_committees(committees)
def _ortools_minimaxav(profile, committeesize, resolute, max_num_of_committees): def set_opt_model_func( model, profile, in_committee, committeesize, ): max_hamming_distance = model.NewIntVar(lb=0, ub=profile.num_cand, name="max_hamming_distance") model.Add( sum(in_committee[cand] for cand in profile.candidates) == committeesize) for voter in profile: not_approved = [ cand for cand in profile.candidates if cand not in voter.approved ] # maximum Hamming distance is greater of equal than the Hamming distances # between individual voters and the committee model.Add(max_hamming_distance >= sum(1 - in_committee[cand] for cand in voter.approved) + sum(in_committee[cand] for cand in not_approved)) # maximizing the negative distance makes code more similar to the other methods here model.Maximize(-max_hamming_distance) committees = _optimize_rule_ortools( set_opt_model_func, profile, committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, name="Minimax AV", committeescorefct=lambda profile, committee: -scores.minimaxav_score( profile, committee), # negative because _optimize_rule_mip maximizes while minimaxav minimizes ) return sorted_committees(committees)
def _mip_minimaxav(profile, committeesize, resolute, max_num_of_committees, solver_id): def set_opt_model_func(model, profile, in_committee, committeesize): max_hamming_distance = model.add_var( var_type=mip.INTEGER, lb=0, ub=profile.num_cand, name="max_hamming_distance", ) model += mip.xsum(in_committee[cand] for cand in profile.candidates) == committeesize for voter in profile: not_approved = [ cand for cand in profile.candidates if cand not in voter.approved ] # maximum Hamming distance is greater of equal than the Hamming distances # between individual voters and the committee model += max_hamming_distance >= mip.xsum( 1 - in_committee[cand] for cand in voter.approved) + mip.xsum( in_committee[cand] for cand in not_approved) # maximizing the negative distance makes code more similar to the other methods here model.objective = mip.maximize(-max_hamming_distance) committees = _optimize_rule_mip( set_opt_model_func, profile, committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, solver_id=solver_id, name="minimaxav", committeescorefct=lambda profile, committee: scores.minimaxav_score( profile, committee) * -1, # negative because _optimize_rule_mip maximizes while minimaxav minimizes ) return sorted_committees(committees)
def _mip_lexcc(profile, committeesize, resolute, max_num_of_committees, solver_id): def set_opt_model_func(model, profile, in_committee, committeesize): # utility[(voter, x)] contains (intended binary) variables counting the number of approved # candidates in the selected committee by `voter`. This utility[(voter, x)] is true for # exactly the number of candidates in the committee approved by `voter` for all # x = 1...committeesize. utility = {} iteration = len(satisfaction_constraints) marginal_scorefcts = [ scores.get_marginal_scorefct(f"atleast{i + 1}") for i in range(iteration + 1) ] max_in_committee = {} for i, voter in enumerate(profile): # maximum number of approved candidates that this voter can have in a committee max_in_committee[voter] = min(len(voter.approved), committeesize) for x in range(1, max_in_committee[voter] + 1): utility[(voter, x)] = model.add_var(var_type=mip.BINARY, name=f"utility({i},{x})") # constraint: the committee has the required size model += mip.xsum(in_committee) == committeesize # constraint: utilities are consistent with actual committee for voter in profile: model += mip.xsum( utility[voter, x] for x in range(1, max_in_committee[voter] + 1)) == mip.xsum( in_committee[cand] for cand in voter.approved) # additional constraints from previous iterations for prev_iteration in range(iteration): model += (mip.xsum( float(marginal_scorefcts[prev_iteration](x)) * voter.weight * utility[(voter, x)] for voter in profile for x in range(1, max_in_committee[voter] + 1)) >= satisfaction_constraints[prev_iteration] - ACCURACY) # objective: the at-least-y score of the committee in iteration y model.objective = mip.maximize( mip.xsum( float(marginal_scorefcts[iteration](x)) * voter.weight * utility[(voter, x)] for voter in profile for x in range(1, max_in_committee[voter] + 1))) # proceed in `committeesize` many iterations to achieve lexicographic tie-breaking satisfaction_constraints = [] for iteration in range(1, committeesize): # in iteration x maximize the number of voters that have at least x approved candidates # in the committee committees = _optimize_rule_mip( set_opt_model_func=set_opt_model_func, profile=profile, committeesize=committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, solver_id=solver_id, name=f"lexcc-atleast{iteration}", committeescorefct=functools.partial(scores.thiele_score, f"atleast{iteration}"), reuse_model=False, # slower, but apparently necessary ) new_score = scores.thiele_score(f"atleast{iteration}", profile, committees[0]) if new_score == 0: satisfaction_constraints += [0] * (committeesize - 1 - len(satisfaction_constraints)) break satisfaction_constraints.append(new_score) iteration = committeesize committees = _optimize_rule_mip( set_opt_model_func=set_opt_model_func, profile=profile, committeesize=committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, solver_id=solver_id, name="lexcc-final", committeescorefct=functools.partial(scores.thiele_score, f"atleast{committeesize}"), reuse_model=False, # slower, but apparently necessary ) satisfaction_constraints.append( scores.thiele_score(f"atleast{iteration}", profile, committees[0])) detailed_info = {"opt_score_vector": satisfaction_constraints} return sorted_committees(committees), detailed_info
def _ortools_monroe(profile, committeesize, resolute): def set_opt_model_func( model, profile, in_committee, committeesize, previously_found_committees, ): num_voters = len(profile) # optimization goal: variable "satisfaction" satisfaction = model.NewIntVar(lb=0, ub=num_voters, name="satisfaction") model.Add( sum(in_committee[cand] for cand in profile.candidates) == committeesize) # a partition of voters into committeesize many sets partition = {(cand, voter): model.NewBoolVar(name=f"partition{cand}-{voter}") for cand in profile.candidates for voter in range(num_voters)} for i in range(len(profile)): # every voter has to be part of a voter partition set model.Add( sum(partition[(cand, i)] for cand in profile.candidates) == 1) for cand in profile.candidates: # every voter set in the partition has to contain # at least (num_voters // committeesize) candidates model.Add( sum(partition[(cand, j)] for j in range(len(profile))) >= ( num_voters // committeesize - num_voters * (1 - in_committee[cand]))) # every voter set in the partition has to contain # at most ceil(num_voters/committeesize) candidates model.Add( sum(partition[(cand, j)] for j in range(len(profile))) <= ( num_voters // committeesize + bool(num_voters % committeesize) + num_voters * (1 - in_committee[cand]))) # if in_committee[i] = 0 then partition[(i,j) = 0 model.Add( sum(partition[(cand, j)] for j in range(len(profile))) <= num_voters * in_committee[cand]) # constraint for objective variable "satisfaction" model.Add( sum(partition[(cand, j)] * (cand in profile[j].approved) for j in range(len(profile)) for cand in profile.candidates) >= satisfaction) # find a new committee that has not been found before for committee in previously_found_committees: model.Add( sum(in_committee[cand] for cand in committee) <= committeesize - 1) # optimization objective model.Maximize(satisfaction) committees = _optimize_rule_ortools( set_opt_model_func, profile, committeesize, resolute=resolute, ) return sorted_committees(committees)
def cvxpy_thiele_methods(profile, committeesize, scorefct_id, resolute, solver_id): """Compute thiele method using CVXPY. This is similar to `_gurobi_thiele_methods()`, where `gurobipy` is used as interface to Gurobi. This method supports Gurobi too, but also other solvers. Parameters ---------- profile : abcvoting.preferences.Profile approval sets of voters committeesize : int number of chosen alternatives scorefct_id : str must be one of: 'pav' resolute : bool return only one result solver_id : str must be one of: 'glpk_mi', 'cbc', 'scip', 'cvxpy_gurobi' 'cvxpy_gurobi' uses Gurobi in the background, similar to `abcrules_gurobi._gurobi_thiele_methods()`, but using the CVXPY interface instead of gurobipy. Returns ------- committees : list of lists a list of chosen committees, each of them represented as list with candidates named from `0` to `num_cand`, profile.cand_names is ignored """ if solver_id in ["glpk_mi", "cbc", "scip"]: solver = getattr(cp, solver_id.upper()) elif solver_id == "gurobi": solver = cp.GUROBI else: raise ValueError( f"Unknown solver_id for usage with CVXPY: {solver_id}") committees = [] maxscore = None # TODO should we use functions for abcvoting.scores? Does it make it slower? if scorefct_id == "pav": scorefct_value = np.tile(1 / np.arange(1, committeesize + 1), (len(profile), 1)) elif scorefct_id == "av": raise ValueError("scorefct must be monotonic decreasing") else: raise NotImplementedError(f"invalid scorefct_id: {scorefct_id}") # TODO does this make things slower in case of weights == 1 to multiply weights? We could # skip it then of course... weights1d = np.array([voter.weight for voter in profile]) # for some reason CVXPY doesn't like the broadcasting, so we need a 2d array weights = np.tile(weights1d[np.newaxis].T, (1, committeesize)) while True: in_committee = cp.Variable(profile.num_cand, boolean=True) # utility[i, j] indicates whether voter i approves at least j candidates in the # committee, i.e. in row i the first l values are true if i approves l candidates in the # committee and all other values are false. # explicitly setting boolean=True is not necessary, can be skipped and is then implicit # also true as done in abcrules_gurobi._gurobi_thiele_methods() utility = cp.Variable((len(profile), committeesize), boolean=True) # left-hand-side and right-hand-side of the equality constraints: lhs = cp.sum(utility, axis=1) rhs = cp.hstack([ cp.sum([in_committee[cand] for cand in voter.approved]) for voter in profile ]) constraints = [cp.sum(in_committee) == committeesize, lhs == rhs] if solver_id == "glpk_mi": # weird workaround necessary... :( # see https://github.com/cvxgrp/cvxpy/issues/1112#issuecomment-730360543 constraints = [ cp.sum(in_committee) <= committeesize, cp.sum(in_committee) >= committeesize, lhs <= rhs, lhs >= rhs, ] # find a new committee that has not been found yet by excluding previously found committees for committee in committees: constraints.append( cp.sum(in_committee[committee]) <= committeesize - 1) # I don't really understand why, but the * does not seem to be supported here... score = cp.sum( cp.multiply(cp.multiply(scorefct_value, weights), utility)) objective = cp.Maximize(score) problem = cp.Problem(objective, constraints) cvxpy_workaround_infisible = False try: problem.solve(solver=solver) except KeyError: # TODO this is a workaround for https://github.com/cvxgrp/cvxpy/issues/1191 cvxpy_workaround_infisible = True if problem.status in (cp.INFEASIBLE, cp.UNBOUNDED) or cvxpy_workaround_infisible: if len(committees) == 0: raise RuntimeError("no solutions found") break elif problem.status != cp.OPTIMAL: raise RuntimeError( f"Solver returned status {problem.status}. At the moment abcvoting " "can't handle this error.") if maxscore is None: maxscore = problem.value # TODO is there a way to find accuracy for all solvers? # 1e-7 is based on CVXOPT's default value: # https://www.cvxpy.org/tutorial/advanced/index.html # We might miss committees if the value is too high or get wrong solutions if it's too low. if maxscore - problem.value > CVXPY_ACCURACY: # no longer optimal break committee = np.arange(profile.num_cand)[in_committee.value.astype( np.bool)] committees.append(committee.tolist()) if resolute: break return sorted_committees(committees)
def _mip_minimaxphragmen(profile, committeesize, resolute, solver_id): """ILP for Phragmen's minimax rule (minimax-Phragmen), using Python MIP. Minimizes the maximum load. Warning: does not include the lexicographic optimization as specified in Markus Brill, Rupert Freeman, Svante Janson and Martin Lackner. Phragmen's Voting Methods and Justified Representation. https://arxiv.org/abs/2102.12305 Instead: minimizes the maximum load (without consideration of the second-, third-, ...-largest load """ def set_opt_model_func(model, profile, in_committee, committeesize, previously_found_committees, scorefct): load = {} for cand in profile.candidates: for voter in profile: load[(voter, cand)] = model.add_var(lb=0.0, ub=1.0, var_type=mip.CONTINUOUS) # constraint: the committee has the required size model += mip.xsum(in_committee[cand] for cand in profile.candidates) == committeesize for cand in profile.candidates: for voter in profile: if cand not in voter.approved: load[(voter, cand)] = 0 # a candidate's load is distributed among his approvers for cand in profile.candidates: model += (mip.xsum( voter.weight * load[(voter, cand)] for voter in profile if cand in profile.candidates) >= in_committee[cand]) # find a new committee that has not been found before for committee in previously_found_committees: model += mip.xsum(in_committee[cand] for cand in committee) <= committeesize - 1 loadbound = model.add_var(lb=0, ub=committeesize, var_type=mip.CONTINUOUS, name="loadbound") for voter in profile: model += mip.xsum(load[(voter, cand)] for cand in voter.approved) <= loadbound # maximizing the negative distance makes code more similar to the other methods here model.objective = mip.maximize(-loadbound) committees = _optimize_rule_mip( set_opt_model_func, profile, committeesize, scorefct=None, resolute=resolute, solver_id=solver_id, ) return sorted_committees(committees)
def _mip_monroe(profile, committeesize, resolute, solver_id): def set_opt_model_func(model, profile, in_committee, committeesize, previously_found_committees, scorefct): num_voters = len(profile) # optimization goal: variable "satisfaction" satisfaction = model.add_var(ub=num_voters, var_type=mip.INTEGER, name="satisfaction") model += mip.xsum(in_committee[cand] for cand in profile.candidates) == committeesize # a partition of voters into `committeesize` many sets partition = {} for cand in profile.candidates: for voter in range(len(profile)): partition[(cand, voter)] = model.add_var(var_type=mip.BINARY, name="partition") for voter in range(len(profile)): # every voter has to be part of a voter partition set model += mip.xsum(partition[(cand, voter)] for cand in profile.candidates) == 1 for cand in profile.candidates: # every voter set in the partition has to contain # at least (num_voters // committeesize) candidates model += mip.xsum(partition[(cand, voter)] for voter in range(len(profile))) >= ( num_voters // committeesize - num_voters * (1 - in_committee[cand])) # every voter set in the partition has to contain # at most ceil(num_voters/committeesize) candidates model += mip.xsum( partition[(cand, voter)] for voter in range(len(profile))) <= ( num_voters // committeesize + bool(num_voters % committeesize) + num_voters * (1 - in_committee[cand])) # if in_committee[i] = 0 then partition[(i,j) = 0 model += (mip.xsum(partition[(cand, voter)] for voter in range(len(profile))) <= num_voters * in_committee[cand]) # constraint for objective variable "satisfaction" model += (mip.xsum(partition[(cand, voter)] * (cand in profile[voter].approved) for voter in range(len(profile)) for cand in profile.candidates) >= satisfaction) # find a new committee that has not been found before for committee in previously_found_committees: model += mip.xsum(in_committee[cand] for cand in committee) <= committeesize - 1 # optimization objective model.objective = mip.maximize(satisfaction) committees = _optimize_rule_mip( set_opt_model_func, profile, committeesize, scorefct=None, resolute=resolute, solver_id=solver_id, ) return sorted_committees(committees)
def _mip_minimaxphragmen(profile, committeesize, resolute, max_num_of_committees, solver_id): """ILP for Phragmen's minimax rule (minimax-Phragmen), using Python MIP. Minimizes the maximum load. Warning: does not include the lexicographic optimization as specified in Markus Brill, Rupert Freeman, Svante Janson and Martin Lackner. Phragmen's Voting Methods and Justified Representation. https://arxiv.org/abs/2102.12305 Instead: minimizes the maximum load (without consideration of the second-, third-, ...-largest load """ def set_opt_model_func(model, profile, in_committee, committeesize): load = {} for cand in profile.candidates: for i, voter in enumerate(profile): load[(voter, cand)] = model.add_var(lb=0.0, ub=1.0, var_type=mip.CONTINUOUS, name=f"load{i}-{cand}") # constraint: the committee has the required size model += mip.xsum(in_committee[cand] for cand in profile.candidates) == committeesize for cand in profile.candidates: for voter in profile: if cand not in voter.approved: load[(voter, cand)] = 0 # a candidate's load is distributed among his approvers for cand in profile.candidates: model += (mip.xsum( voter.weight * load[(voter, cand)] for voter in profile if cand in profile.candidates) >= in_committee[cand]) loadbound = model.add_var(lb=0, ub=committeesize, var_type=mip.CONTINUOUS, name="loadbound") for voter in profile: model += mip.xsum(load[(voter, cand)] for cand in voter.approved) <= loadbound # maximizing the negative distance makes code more similar to the other methods here model.objective = mip.maximize(-loadbound) # check if a sufficient number of candidates is approved approved_candidates = profile.approved_candidates() if len(approved_candidates) < committeesize: # An insufficient number of candidates is approved: # Committees consist of all approved candidates plus # a correct number of unapproved candidates remaining_candidates = [ cand for cand in profile.candidates if cand not in approved_candidates ] num_missing_candidates = committeesize - len(approved_candidates) if resolute: return [ approved_candidates | set(remaining_candidates[:num_missing_candidates]) ] return [ approved_candidates | set(extra) for extra in itertools.combinations(remaining_candidates, num_missing_candidates) ] committees = _optimize_rule_mip( set_opt_model_func, profile, committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, solver_id=solver_id, name="minimaxphragmen", ) return sorted_committees(committees)
def _ortools_monroe(profile, committeesize, resolute, max_num_of_committees): def set_opt_model_func( model, profile, in_committee, committeesize, ): num_voters = len(profile) # optimization goal: variable "satisfaction" satisfaction = model.NewIntVar(lb=0, ub=num_voters, name="satisfaction") model.Add( sum(in_committee[cand] for cand in profile.candidates) == committeesize) # a partition of voters into committeesize many sets partition = {(cand, voter): model.NewBoolVar(name=f"partition{cand}-{voter}") for cand in profile.candidates for voter in range(num_voters)} for i in range(len(profile)): # every voter has to be part of a voter partition set model.Add( sum(partition[(cand, i)] for cand in profile.candidates) == 1) for cand in profile.candidates: # every voter set in the partition has to contain # at least (num_voters // committeesize) candidates model.Add( sum(partition[(cand, j)] for j in range(len(profile))) >= ( num_voters // committeesize)).OnlyEnforceIf( in_committee[cand]) # alternatively: # model.Add( # sum(partition[(cand, j)] for j in range(len(profile))) # >= (num_voters // committeesize - num_voters * (1 - in_committee[cand])) # ) # every voter set in the partition has to contain # at most ceil(num_voters/committeesize) candidates model.Add( sum(partition[(cand, j)] for j in range(len(profile))) <= ( num_voters // committeesize + bool(num_voters % committeesize))).OnlyEnforceIf( in_committee[cand]) # if in_committee[i] = 0 then partition[(i,j) = 0 for j in range(len(profile)): model.Add(partition[(cand, j)] <= in_committee[cand]) # # alternatively # model.Add( # sum(partition[(cand, j)] for j in range(len(profile))) # <= num_voters * in_committee[cand] # ) # constraint for objective variable "satisfaction" model.Add( sum(partition[(cand, j)] * (cand in profile[j].approved) for j in range(len(profile)) for cand in profile.candidates) >= satisfaction) # optimization objective model.Maximize(satisfaction) committees = _optimize_rule_ortools( set_opt_model_func, profile, committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, name="Monroe", committeescorefct=scores.monroescore, ) return sorted_committees(committees)
def _mip_thiele_methods( scorefct_id, profile, committeesize, resolute, max_num_of_committees, solver_id, ): def set_opt_model_func(model, profile, in_committee, committeesize): # utility[(voter, x)] contains (intended binary) variables counting the number of approved # candidates in the selected committee by `voter`. This utility[(voter, x)] is true for # exactly the number of candidates in the committee approved by `voter` for all # x = 1...committeesize. # # If marginal_scorefct(x) > 0 for x >= 1, we assume that marginal_scorefct is monotonic # decreasing and therefore in combination with the objective function the following # interpreation is valid: # utility[(voter, x)] indicates whether `voter` approves at least x candidates in the # committee (this is the case for marginal_scorefct "pav", "slav" or "geom"). utility = {} max_in_committee = {} for i, voter in enumerate(profile): max_in_committee[voter] = min(len(voter.approved), committeesize) for x in range(1, max_in_committee[voter] + 1): utility[(voter, x)] = model.add_var(var_type=mip.BINARY, name=f"utility({i},{x})") # constraint: the committee has the required size model += mip.xsum(in_committee) == committeesize # constraint: utilities are consistent with actual committee for voter in profile: model += mip.xsum( utility[voter, x] for x in range(1, max_in_committee[voter] + 1)) == mip.xsum( in_committee[cand] for cand in voter.approved) # objective: the Thiele score of the committee model.objective = mip.maximize( mip.xsum( float(marginal_scorefct(x)) * voter.weight * utility[(voter, x)] for voter in profile for x in range(1, max_in_committee[voter] + 1))) marginal_scorefct = scores.get_marginal_scorefct(scorefct_id, committeesize) score_values = [marginal_scorefct(x) for x in range(1, committeesize + 1)] if not all(first > second or first == second == 0 for first, second in zip(score_values, score_values[1:])): raise ValueError("The score function must be monotonic decreasing") min_score_value = min(val for val in score_values if val > 0) if min_score_value < ACCURACY: output.warning( f"Thiele scoring function {scorefct_id} can take smaller values " f"(min={min_score_value}) than mip accuracy ({ACCURACY}).") committees = _optimize_rule_mip( set_opt_model_func, profile, committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, solver_id=solver_id, name=scorefct_id, committeescorefct=functools.partial(scores.thiele_score, scorefct_id), ) return sorted_committees(committees)
def _gurobi_lexminimaxav(profile, committeesize, resolute, max_num_of_committees): def set_opt_model_func(model, in_committee): voteratmostdistances = {} for i, voter in enumerate(profile): for dist in range(profile.num_cand + 1): voteratmostdistances[(i, dist)] = model.addVar( vtype=gb.GRB.BINARY, name=f"atmostdistance({i, dist})") if dist >= len(voter.approved) + committeesize: # distances are always <= len(voter.approved) + committeesize voteratmostdistances[(i, dist)] = 1 if dist < abs(len(voter.approved) - committeesize): # distances are never < abs(len(voter.approved) - committeesize) voteratmostdistances[(i, dist)] = 0 # constraint: the committee has the required size model.addConstr(gb.quicksum(in_committee) == committeesize) # constraint: distances are consistent with actual committee for i, voter in enumerate(profile): not_approved = [ cand for cand in profile.candidates if cand not in voter.approved ] for dist in range(profile.num_cand + 1): if isinstance(voteratmostdistances[(i, dist)], int): # trivially satisfied continue model.addConstr((voteratmostdistances[(i, dist)] == 1) >> ( gb.quicksum(1 - in_committee[cand] for cand in voter.approved) + gb.quicksum(in_committee[cand] for cand in not_approved) <= dist)) # additional constraints from previous iterations for dist, num_voters_achieving_distance in hammingdistance_constraints.items( ): model.addConstr( gb.quicksum(voteratmostdistances[(i, dist)] for i, _ in enumerate(profile)) >= num_voters_achieving_distance - ACCURACY) new_distance = min(hammingdistance_constraints.keys()) - 1 # objective: maximize number of voters achieving at most distance `new_distance` model.setObjective( gb.quicksum(voteratmostdistances[(i, new_distance)] for i, _ in enumerate(profile)), gb.GRB.MAXIMIZE, ) # compute minimaxav as baseline and then improve on it committees = _gurobi_minimaxav(profile, committeesize, resolute=True, max_num_of_committees=None) maxdistance = scores.minimaxav_score(profile, committees[0]) # all voters have at most this distance hammingdistance_constraints = {maxdistance: len(profile)} for distance in range(maxdistance - 1, -1, -1): # in iteration `distance` we maximize the number of voters that have at # most a Hamming distance of `distance` to the committee if distance == 0: # last iteration _resolute = resolute _max_num_of_committees = max_num_of_committees else: _resolute = True _max_num_of_committees = None committees, _ = _optimize_rule_gurobi( set_opt_model_func=set_opt_model_func, profile=profile, committeesize=committeesize, resolute=_resolute, max_num_of_committees=_max_num_of_committees, name=f"lexminimaxav-atmostdistance{distance}", committeescorefct=functools.partial( scores.num_voters_with_upper_bounded_hamming_distance, distance), ) num_voters_achieving_distance = scores.num_voters_with_upper_bounded_hamming_distance( distance, profile, committees[0]) hammingdistance_constraints[distance] = num_voters_achieving_distance committees = sorted_committees(committees) detailed_info = { "hammingdistance_constraints": hammingdistance_constraints, "opt_distances": [misc.hamming(voter.approved, committees[0]) for voter in profile], } return committees, detailed_info
def _gurobi_leximaxphragmen(profile, committeesize, resolute, max_num_of_committees): def set_opt_model_func(model, in_committee): load = {} loadbound_constraint = {} for cand in profile.candidates: for i, voter in enumerate(profile): load[(voter, cand)] = model.addVar(ub=1.0, lb=0.0, name=f"load{i}-{cand}") for i, _ in enumerate(profile): for j, _ in enumerate(profile): loadbound_constraint[(i, j)] = model.addVar( vtype=gb.GRB.BINARY, name=f"loadbound_constraint({i, j})") for i, _ in enumerate(profile): model.addConstr( gb.quicksum(loadbound_constraint[(i, j)] for j, _ in enumerate(profile)) == 1) model.addConstr( gb.quicksum(loadbound_constraint[(j, i)] for j, _ in enumerate(profile)) == 1) # constraint: the committee has the required size model.addConstr( gb.quicksum(in_committee[cand] for cand in profile.candidates) == committeesize) for cand in profile.candidates: for voter in profile: if cand not in voter.approved: load[(voter, cand)] = 0 # a candidate's load is distributed among his approvers for cand in profile.candidates: model.addConstr( gb.quicksum( voter.weight * load[(voter, cand)] for voter in profile if cand in profile.candidates) >= in_committee[cand]) for i, _ in enumerate(loadbounds): for j, voter in enumerate(profile): model.addConstr( gb.quicksum(load[(voter, cand)] for cand in voter.approved) <= loadbounds[i] + (1 - loadbound_constraint[(i, j)]) * committeesize + ACCURACY # constraint applies only if loadbound_constraint[(i, voter)] == 1 ) newloadbound = model.addVar(lb=0, ub=committeesize, name="new loadbound") for j, voter in enumerate(profile): model.addConstr( gb.quicksum(load[(voter, cand)] for cand in voter.approved) <= newloadbound + gb.quicksum(loadbound_constraint[(i, j)] * committeesize for i in range(len(loadbounds)))) # maximizing the negative distance makes code more similar to the other methods here model.setObjective(-newloadbound, gb.GRB.MAXIMIZE) # check if a sufficient number of candidates is approved approved_candidates = profile.approved_candidates() if len(approved_candidates) < committeesize: # An insufficient number of candidates is approved: # Committees consist of all approved candidates plus # a correct number of unapproved candidates remaining_candidates = [ cand for cand in profile.candidates if cand not in approved_candidates ] num_missing_candidates = committeesize - len(approved_candidates) if resolute: return [ approved_candidates | set(remaining_candidates[:num_missing_candidates]) ] return [ approved_candidates | set(extra) for extra in itertools.combinations(remaining_candidates, num_missing_candidates) ] loadbounds = [] for iteration in range(len(profile) - 1): # in interation we enforce a new loadbound. # first for all voters, then for all except one, then for all except two, etc. committees, neg_loadbound = _optimize_rule_gurobi( set_opt_model_func=set_opt_model_func, profile=profile, committeesize=committeesize, resolute=True, max_num_of_committees=None, name=f"leximaxphragmen-iteration{iteration}", ) if math.isclose(neg_loadbound, 0, rel_tol=CMP_ACCURACY, abs_tol=CMP_ACCURACY): # all other voters have a load of zero, no further loadbounds constraints required break loadbounds.append(-neg_loadbound) committees, _ = _optimize_rule_gurobi( set_opt_model_func=set_opt_model_func, profile=profile, committeesize=committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, name="leximaxphragmen-final", ) return sorted_committees(committees)
def _gurobi_lexcc(profile, committeesize, resolute, max_num_of_committees): def set_opt_model_func(model, in_committee): # utility[(voter, x)] contains (intended binary) variables counting the number of approved # candidates in the selected committee by `voter`. This utility[(voter, x)] is true for # exactly the number of candidates in the committee approved by `voter` for all # x = 1...committeesize. utility = {} iteration = len(satisfaction_constraints) scorefcts = [ scores.get_marginal_scorefct(f"atleast{i + 1}") for i in range(iteration + 1) ] max_in_committee = {} for i, voter in enumerate(profile): # maximum number of approved candidates that this voter can have in a committee max_in_committee[voter] = min(len(voter.approved), committeesize) for x in range(1, max_in_committee[voter] + 1): utility[(voter, x)] = model.addVar(vtype=gb.GRB.BINARY, name=f"utility({i, x})") # constraint: the committee has the required size model.addConstr(gb.quicksum(in_committee) == committeesize) # constraint: utilities are consistent with actual committee for voter in profile: model.addConstr( gb.quicksum(utility[voter, x] for x in range(1, max_in_committee[voter] + 1)) == gb.quicksum( in_committee[cand] for cand in voter.approved)) # additional constraints from previous iterations for prev_iteration in range(iteration): model.addConstr( gb.quicksum( float(scorefcts[prev_iteration](x)) * voter.weight * utility[(voter, x)] for voter in profile for x in range(1, max_in_committee[voter] + 1)) >= satisfaction_constraints[prev_iteration] - ACCURACY) # objective: the at-least-y score of the committee in iteration y model.setObjective( gb.quicksum( float(scorefcts[iteration](x)) * voter.weight * utility[(voter, x)] for voter in profile for x in range(1, max_in_committee[voter] + 1)), gb.GRB.MAXIMIZE, ) # proceed in `committeesize` many iterations to achieve lexicographic tie-breaking satisfaction_constraints = [] for iteration in range(1, committeesize): # in iteration x maximize the number of voters that have at least x approved candidates # in the committee committees, _ = _optimize_rule_gurobi( set_opt_model_func=set_opt_model_func, profile=profile, committeesize=committeesize, resolute=True, max_num_of_committees=None, name=f"lexcc-atleast{iteration}", committeescorefct=functools.partial(scores.thiele_score, f"atleast{iteration}"), ) satisfaction_constraints.append( scores.thiele_score(f"atleast{iteration}", profile, committees[0])) iteration = committeesize committees, _ = _optimize_rule_gurobi( set_opt_model_func=set_opt_model_func, profile=profile, committeesize=committeesize, resolute=resolute, max_num_of_committees=max_num_of_committees, name="lexcc-final", committeescorefct=functools.partial(scores.thiele_score, f"atleast{committeesize}"), ) satisfaction_constraints.append( scores.thiele_score(f"atleast{iteration}", profile, committees[0])) detailed_info = {"opt_score_vector": satisfaction_constraints} return sorted_committees(committees), detailed_info