def setup(self, solver): variables = self.variables coefficients = self.coefficients sign = self.sign rhs = self.rhs name = self.name if coefficients: lhs = xsum([ variable.get_var(solver) * coefficient for variable, coefficient in zip(variables, coefficients) ]) else: lhs = xsum([var.get_var(solver) for var in variables]) if isinstance(rhs, MIPVariable): rhs = rhs.get_var(solver) if sign == SolverSign.EQ: solver += (lhs == rhs, name) if name else lhs == rhs elif sign == SolverSign.NOT_EQ: solver += (lhs != rhs, name) if name else lhs != rhs elif sign == SolverSign.GTE: solver += (lhs >= rhs, name) if name else lhs >= rhs elif sign == SolverSign.LTE: solver += (lhs <= rhs, name) if name else lhs <= rhs else: raise SolverException('Incorrect constraint sign')
def test_cutting_stock(solver: str): n = 10 # maximum number of bars L = 250 # bar length m = 4 # number of requests w = [187, 119, 74, 90] # size of each item b = [1, 2, 2, 1] # demand for each item # creating the model model = Model(solver_name=solver) x = {(i, j): model.add_var(obj=0, var_type=INTEGER, name="x[%d,%d]" % (i, j)) for i in range(m) for j in range(n)} y = { j: model.add_var(obj=1, var_type=BINARY, name="y[%d]" % j) for j in range(n) } # constraints for i in range(m): model.add_constr(xsum(x[i, j] for j in range(n)) >= b[i]) for j in range(n): model.add_constr(xsum(w[i] * x[i, j] for i in range(m)) <= L * y[j]) # additional constraints to reduce symmetry for j in range(1, n): model.add_constr(y[j - 1] >= y[j]) # optimizing the model model.optimize() # sanity tests assert model.status == OptimizationStatus.OPTIMAL assert abs(model.objective_value - 3) <= 1e-4 assert sum(x.x for x in model.vars) >= 5
def set_linear_coefficients(self, coefficients): if self.problem is None: raise Exception( 'Can\'t change coefficients if objective is not associated with a model.' ) self.problem.update() model = self.problem.problem expr = model.objective.expr names = set(var.name for var in coefficients) obj = mip.xsum(model.var_by_name('v_' + var.name) * coef for var, coef in coefficients.items()) \ + mip.xsum(var * coef for var, coef in expr.items() if var.name[2:] not in names) \ + model.objective.const # TODO: why does this not work? It would likely be faster # obj_update = mip.xsum(model.var_by_name('v_' + var.name) * coef # for var, coef in coefficients.items()) \ # - mip.xsum(-var * coef for var, coef in expr.items() # if var.name[2:] in names) # model.objective.add_expr(obj_update) self._changed_expression.update(coefficients) model.objective = obj
def basic_cuts_plus_epsilon(self, S, a, k): # right side rs = 0 for j in S: rs += self.instance.times[j][a] * self.x[j][a][1] # left side ls = 0 soma1 = 0 for j in range(len(S)): for i in range(j + 1, len(S)): soma1 += self.instance.times[S[i]][a] * self.instance.times[ S[j]][a] soma1 += self.instance.e[k][a] * self.p(S, a) ls += soma1 soma2 = 0 for j in S: if (isinstance(self.y[j][k][a][0], int) == False): soma2 += self.y[j][k][a][1] * max( self.instance.e[k][a] - self.instance.e[j][a], 0) else: return 0 ls -= soma2 * self.p(S, a) cuts = 0 if (ls - rs) > 0.00001: # if rs < ls: return xsum(self.instance.times[j][a] * self.x[j][a][0] for j in S) + xsum(self.y[j][k][a][0] * max( self.instance.e[k][a] - self.instance.e[j][a], 0) for j in S) * self.p(S, a) >= soma1 return 0
def set_opt_model_func(model, profile, in_committee, committeesize): load = {} for cand in profile.candidates: for i, voter in enumerate(profile): load[(voter, cand)] = model.add_var(lb=0.0, ub=1.0, var_type=mip.CONTINUOUS, name=f"load{i}-{cand}") # constraint: the committee has the required size model += mip.xsum(in_committee[cand] for cand in profile.candidates) == committeesize for cand in profile.candidates: for voter in profile: if cand not in voter.approved: load[(voter, cand)] = 0 # a candidate's load is distributed among his approvers for cand in profile.candidates: model += (mip.xsum( voter.weight * load[(voter, cand)] for voter in profile if cand in profile.candidates) >= in_committee[cand]) loadbound = model.add_var(lb=0, ub=committeesize, var_type=mip.CONTINUOUS, name="loadbound") for voter in profile: model += mip.xsum(load[(voter, cand)] for cand in voter.approved) <= loadbound # maximizing the negative distance makes code more similar to the other methods here model.objective = mip.maximize(-loadbound)
def solve(active: list, centers: list, sets: list, M: int) -> list: N, K = len(active), len(sets) ### model and variables m = Model(sense=MAXIMIZE, solver_name=CBC) # whether the ith set is picked x = [m.add_var(name=f"x{i}", var_type=BINARY) for i in range(K)] # whether the ith point is covered y = [m.add_var(name=f"y{i}", var_type=BINARY) for i in range(N)] ### constraints m += xsum(x) == M, "number_circles" for i in range(N): # if yi is covered, at least one set needs to have it included = [x[k] for k in range(K) if active[i] in sets[k]] m += xsum(included) >= y[i], f"inclusion{i}" ### objective: maximize number of circles covered m.objective = xsum(y[i] for i in range(N)) m.emphasis = 2 # emphasize optimality m.verbose = 1 status = m.optimize() circles = [centers[i] for i in range(K) if x[i].x >= 0.99] covered = {active[i] for i in range(N) if y[i].x >= 0.99} return circles, covered
def set_opt_model_func(model, profile, in_committee, committeesize, previously_found_committees, scorefct): max_hamming_distance = model.add_var( var_type=mip.INTEGER, lb=0, ub=profile.num_cand, name="max_hamming_distance", ) model += mip.xsum(in_committee[cand] for cand in profile.candidates) == committeesize for voter in profile: not_approved = [ cand for cand in profile.candidates if cand not in voter.approved ] # maximum Hamming distance is greater of equal than the Hamming distances # between individual voters and the committee model += max_hamming_distance >= mip.xsum( 1 - in_committee[cand] for cand in voter.approved) + mip.xsum( in_committee[cand] for cand in not_approved) # find a new committee that has not been found before for committee in previously_found_committees: model += mip.xsum(in_committee[cand] for cand in committee) <= committeesize - 1 # maximizing the negative distance makes code more similar to the other methods here model.objective = mip.maximize(-max_hamming_distance)
def mip_optimization(cal_df, y, constrain=3, daily_weights=None): """Mixed integer linear programming optimization with constraints. Args: y (numpy.ndarray): sum of daily features (dim=#ofdays) constrain (int): minimum days in office daily_weights (array): weighting of days, e.g. if you prefer to come on mondays Return: """ # daily weighting u = np.ones(len(y)) if daily_weights == None else daily_weights I = range(len(y)) # idx for days for summation m = Model("knapsack") # MIP model w = [m.add_var(var_type=BINARY) for i in I] # weights to optimize m.objective = maximize(xsum(y[i] * w[i] for i in I)) # optimization function m += xsum(w[i] * u[i] for i in I) <= constrain # constraint m.optimize() #selected = [i for i in I if w[i].x >= 0.99] selected = [w[i].x for i in I] df = pd.DataFrame(columns=["home_office"], index=cal_df.index, data={'home_office': selected}) return df
def do_matching(graph, visualize=True): print("Starting model") weights = dict() graph = {int(key): graph[key] for key in graph} E = set() V = graph.keys() for v in V: original = v for u, weight in graph[original]: s, t = (u, v) if u < v else (v, u) edge = (s, t) E.add(edge) weights[original, u] = weight if visualize: graph = nx.Graph() graph.add_nodes_from(V) graph.add_edges_from(E) nx.draw_kamada_kawai(graph) plt.show() model = Model("Maximum matching") edge_vars = {e: model.add_var(var_type=BINARY) for e in E} for v in V: model += xsum(edge_vars[s, t] for s, t in E if v in [s, t]) <= 1 model.objective = maximize( xsum( xsum(((weights[edge] + weights[edge[1], edge[0]]) / 2) * edge_vars[edge] for edge in E) for edge in E)) model.optimize(max_seconds=300) return sorted([e for e in E if edge_vars[e].x > .01])
def ilp(prods, n_prods, alpha): S1, S2, S3, S4 = segregate(prods, n_prods, alpha) x = np.array([0.0 for i in range(n_prods)]) d = 0 for prod in S1: x[prod.index] = 1 d = d + (prod.q - alpha) rev = [-1.0 for i in range(n_prods)] qdiff = [-1.0 for i in range(n_prods)] for prod in S2: ind = prod.index rev[ind] = prod.r qdiff[ind] = prod.q - alpha for prod in S3: ind = prod.index rev[ind] = prod.r qdiff[ind] = prod.q - alpha m = Model('ilp') m.verbose = False y = [m.add_var(var_type=BINARY) for i in range(n_prods)] m.objective = maximize(xsum(rev[i] * y[i] for i in range(n_prods))) m += xsum(qdiff[i] * y[i] for i in range(n_prods)) >= -1 * d m.optimize() selected = np.array([y[i].x for i in range(n_prods)]) import pdb pdb.set_trace() selected = np.floor(selected + 0.01) import pdb pdb.set_trace() return x + selected, d
def test_knapsack(solver: str): p = [10, 13, 18, 31, 7, 15] w = [11, 15, 20, 35, 10, 33] c, I = 47, range(len(w)) m = Model("knapsack", solver_name=solver) x = [m.add_var(var_type=BINARY) for i in I] m.objective = maximize(xsum(p[i] * x[i] for i in I)) m += xsum(w[i] * x[i] for i in I) <= c, "cap" m.optimize() assert m.status == OptimizationStatus.OPTIMAL assert round(m.objective_value) == 41 m.constr_by_name("cap").rhs = 60 m.optimize() assert m.status == OptimizationStatus.OPTIMAL assert round(m.objective_value) == 51 # modifying objective function m.objective = m.objective + 10 * x[0] + 15 * x[1] assert abs(m.objective.expr[x[0]] - 20) <= 1e-10 assert abs(m.objective.expr[x[1]] - 28) <= 1e-10
def update_model(vrp_model: VrpProblem, model: MyModelMilp, x_var, components_per_vehicle, edges_in_customers, edges_out_customers): for vehicle in components_per_vehicle: comps = components_per_vehicle[vehicle] print("Updating model : Nb component : ", len(comps)) if len(comps) > 1: for si in comps: s = (set([x[1] for x in si[0]]), si[1]) print(vehicle, ":", s) edge_in_of_interest = [ e for n in s[0] for e in edges_in_customers[n] if e[0][1] not in s[0] ] edge_out_of_interest = [ e for n in s[0] for e in edges_out_customers[n] if e[1][1] not in s[0] ] print(edge_in_of_interest) print("Len of interest : ", len(edge_out_of_interest), len(edge_in_of_interest)) model.add_constr( xsum([x_var[e] for e in edge_in_of_interest]) >= 1) model.add_constr( xsum([x_var[e] for e in edge_out_of_interest]) >= 1) model.update()
def build(self, min_obj_value=None, max_n_solutions=None): self.V = set(self.graph.nodes()) self.n = len(self.V) seed(0) self.Arcs = self.graph.edges() self.add_variables() self.add_constraints() if min_obj_value is not None: constr = xsum(self.graph[i][j]['weight'] * self.x[(i, j)] for (i, j) in self.Arcs) > min_obj_value self.model += constr # objective function: minimize the distance self.model.objective = minimize( xsum(self.graph[i][j]['weight'] * self.x[(i, j)] for (i, j) in self.Arcs)) self.F = self.get_farthest_point_list() self.model.cuts_generator = SubTourCutGenerator( self.F, self.x, self.V, self.graph) return self
def find_optimal_pairs(N, weights) -> list[tuple[int, int]]: """ find_optimal_pairs finds an optimal set of pairs of integers between 0 and N-1 (incl) that minimize the sum of the weights specified for each pair. """ pairs = [(i, j) for i in range(N - 1) for j in range(i + 1, N)] def pairs_containing(k): return chain(((i, k) for i in range(k)), ((k, i) for i in range(k + 1, N))) m = mip.Model() p = {(i, j): m.add_var(var_type=mip.BINARY) for i, j in pairs} # Constraint: a person can only be in one pair, so sum of all pairs with person k must be 1 for k in range(N): m += mip.xsum(p[i, j] for i, j in pairs_containing(k)) == 1 m.objective = mip.minimize( mip.xsum(weights(i, j) * p[i, j] for i, j in pairs)) m.verbose = False status = m.optimize() if status != mip.OptimizationStatus.OPTIMAL: raise Exception("not optimal" + status) print("Objective value =", m.objective_value) return [(i, j) for i, j in pairs if p[i, j].x > 0.5]
def __init__(self, facilities: List[Facility], customers: List[Customer]): self.m = Model('NaiveFacilityMIP', solver_name=CBC) self.customer_facility_map = [[self.m.add_var(var_type=BINARY) for facility in range(len(facilities))] for customer in range(len(customers))] self.facility_customer_map = list(zip(*self.customer_facility_map)) for customer in range(len(customers)): self.m.add_constr(xsum(self.customer_facility_map[customer]) == 1) for facility in range(len(facilities)): self.m.add_constr(xsum([customers[index].demand * variable for index, variable in enumerate(self.facility_customer_map[facility])]) <= facilities[facility].capacity) facility_enabled = [self.m.add_var(var_type=BINARY) for facility in range(len(facilities))] for facility in range(len(facilities)): for customer in range(len(customers)): self.m.add_constr(self.facility_customer_map[facility][customer] <= facility_enabled[facility]) self.m.objective = xsum([facilities[f].setup_cost * facility_enabled[f] for f in range(len(facilities))]) + \ xsum([euclideam_length(facilities[facility].location, customers[customer].location) * self.facility_customer_map[facility][customer] for facility in range(len(facilities)) for customer in range(len(customers))]) self.m.verbose = True self.m.max_seconds = 60 * 20
def find_optimal_pairs(N, weights) -> (float, list[tuple[int, int]]): """ find_optimal_pairs finds an optimal set of pairs of integers between 0 and N-1 (incl) that minimize the sum of the weights specified for each pair. Returns the objective value and list of pairs. """ pairs = [(i, j) for i in range(N) for j in range(i, N)] # note: people are excluded from the round by pairing with themselves def pairs_containing(k): return chain(((i, k) for i in range(k)), ((k, i) for i in range(k, N))) m = mip.Model() p = {(i, j): m.add_var(var_type=mip.BINARY) for i, j in pairs} # Constraint: a person can only be in one pair, so sum of all pairs with person k must be 1 for k in range(N): m += mip.xsum(p[i, j] for i, j in pairs_containing(k)) == 1 m.objective = mip.minimize( mip.xsum(weights[i, j] * p[i, j] for i, j in pairs)) m.verbose = False status = m.optimize() if status != mip.OptimizationStatus.OPTIMAL: raise Exception("not optimal") return m.objective_value, [(i, j) for i, j in pairs if p[i, j].x > 0.5]
def build_model(self, solver): """MIP model n-queens""" n = 50 queens = Model('queens', MAXIMIZE, solver_name=solver) queens.verbose = 0 x = [[ queens.add_var('x({},{})'.format(i, j), var_type=BINARY) for j in range(n) ] for i in range(n)] # one per row for i in range(n): queens += xsum(x[i][j] for j in range(n)) == 1, 'row{}'.format(i) # one per column for j in range(n): queens += xsum(x[i][j] for i in range(n)) == 1, 'col{}'.format(j) # diagonal \ for p, k in enumerate(range(2 - n, n - 2 + 1)): queens += xsum(x[i][j] for i in range(n) for j in range(n) if i - j == k) <= 1, 'diag1({})'.format(p) # diagonal / for p, k in enumerate(range(3, n + n)): queens += xsum(x[i][j] for i in range(n) for j in range(n) if i + j == k) <= 1, 'diag2({})'.format(p) return n, queens
def _add_area_constraints( self, m: Model, v2var: Dict[str, Var], direction: Dir, max_usage_ratio: float, ) -> None: for s, v_group in self.curr_s2v.items(): bottom_or_left, up_or_right = self.slot_manager.partitionSlotByHalf( s, direction) for r in RESOURCE_TYPES: v_var_list = [v2var[v] for v in v_group] area_list = [ v.getVertexAndInboundFIFOArea()[r] for v in v_group ] I = range(len(v_group)) # for the up/right child slot (if mod_x is assigned 1) m += xsum( v_var_list[i] * area_list[i] for i in I) <= up_or_right.getArea()[r] * max_usage_ratio # for the down/left child slot (if mod_x is assigned 0) m += xsum((1 - v_var_list[i]) * area_list[i] for i in I) <= bottom_or_left.getArea()[r] * max_usage_ratio
def estimate_cp(bal_res, de_ads): df_normal = bal_res.loc[lambda row: row.dp_phased == False] df_abnormal = bal_res.loc[lambda row: row.dp_phased == True] ads_abnormal = de_ads[df_abnormal.index] M_, m_ = np.full(len(bal_res), np.nan), np.full(len(bal_res), np.nan) hap_dp = df_normal.avg_depths.mean() / 2 for row in df_abnormal.itertuples(): ads = de_ads[row.Index] mod = mip.Model() M, m = mod.add_var(var_type=mip.INTEGER, lb=1), mod.add_var(var_type=mip.INTEGER, lb=1) eM = [mod.add_var() for i in range(len(ads))] em = [mod.add_var() for i in range(len(ads))] for i in range(len(ads)): mod += ads[i].max() - M * hap_dp >= -eM[i] mod += ads[i].max() - M * hap_dp <= eM[i] mod += ads[i].min() - m * hap_dp >= -em[i] mod += ads[i].min() - m * hap_dp <= em[i] mod += M >= m mod.objective = mip.xsum(eM[i] for i in range(len(ads))) + mip.xsum( em[i] for i in range(len(ads))) mod.optimize() M_[row.Index] = M.x m_[row.Index] = m.x res = pd.concat( [bal_res, pd.DataFrame({ 'cp': M_ + m_, 'M': M_, 'm': m_ })], axis=1) return res
def _compute_integer_image_sizes(image_sizes: List[Size], layout: Layout) -> List[Size]: import mip constraints = layout.get_constraints(image_sizes) aspect_ratios = [h / w for w, h in image_sizes] # set up a mixed-integer program, and solve it n_images = len(image_sizes) m = mip.Model() var_widths = [m.add_var(var_type=mip.INTEGER) for _ in range(n_images)] var_heights = [m.add_var(var_type=mip.INTEGER) for _ in range(n_images)] for c in constraints: if c.is_height: vars = ([var_heights[i] for i in c.positive_ids] + [-var_heights[i] for i in c.negative_ids]) else: vars = ([var_widths[i] for i in c.positive_ids] + [-var_widths[i] for i in c.negative_ids]) m.add_constr(mip.xsum(vars) == c.result) # the errors come from a deviation in aspect ratio var_errs = [m.add_var(var_type=mip.CONTINUOUS) for _ in range(n_images)] for err, w, h, ar in zip(var_errs, var_widths, var_heights, aspect_ratios): m.add_constr(err == h - w * ar) # To minimise error, we need to create a convex cost function. Common # options are either abs(err) or err ** 2. However, both these functions are # non-linear, so cannot be directly computed in MIP. We can represent abs # exactly with a type-1 SOS, and approximate ** 2 with a type-2 SOS. Here we # use abs. var_errs_pos = [ m.add_var(var_type=mip.CONTINUOUS) for _ in range(n_images) ] var_errs_neg = [ m.add_var(var_type=mip.CONTINUOUS) for _ in range(n_images) ] var_abs_errs = [ m.add_var(var_type=mip.CONTINUOUS) for _ in range(n_images) ] for abs_err, err, err_pos, err_neg in zip(var_abs_errs, var_errs, var_errs_pos, var_errs_neg): # err_pos and err_neg are both positive representing each side of the # abs function. Only one will be non-zero (SOS Type-1). m.add_constr(err == err_pos - err_neg) m.add_constr(abs_err == err_pos + err_neg) m.add_sos([(err_pos, 1), (err_neg, -1)], sos_type=1) m.objective = mip.minimize(mip.xsum(var_abs_errs)) m.optimize(max_seconds=30) new_sizes = [ Size(int(w.x), int(h.x)) for w, h in zip(var_widths, var_heights) ] return new_sizes
def implement_less_than(r1: int, c1: int, r2: int, c2: int) -> None: """ Implements the constraint value[r1, c1] < value[r2, c2]. """ nonlocal m m += (xsum(x[r1][c1][d] * d for d in range(n)) <= xsum(x[r2][c2][d] * d for d in range(n)) - 1)
def init_model(self, **kwargs): nb_facilities = self.facility_problem.facility_count nb_customers = self.facility_problem.customer_count use_matrix_indicator_heuristic = kwargs.get("use_matrix_indicator_heuristic", True) if use_matrix_indicator_heuristic: n_shortest = kwargs.get("n_shortest", 10) n_cheapest = kwargs.get("n_cheapest", 10) matrix_fc_indicator, matrix_length = prune_search_space(self.facility_problem, n_cheapest=n_cheapest, n_shortest=n_shortest) else: matrix_fc_indicator, matrix_length = prune_search_space(self.facility_problem, n_cheapest=nb_facilities, n_shortest=nb_facilities) s = mip.Model(name="facilities", sense=mip.MINIMIZE, solver_name=self.solver_name) x = {} for f in range(nb_facilities): for c in range(nb_customers): if matrix_fc_indicator[f, c] == 0: x[f, c] = 0 elif matrix_fc_indicator[f, c] == 1: x[f, c] = 1 elif matrix_fc_indicator[f, c] == 2: x[f, c] = s.add_var(var_type=mip.BINARY, obj=0, name="x_" + str((f, c))) facilities = self.facility_problem.facilities customers = self.facility_problem.customers used = s.add_var_tensor((nb_facilities, 1), var_type=GRB.BINARY, name="y") constraints_customer = {} for c in range(nb_customers): constraints_customer[c] = s.add_constr(mip.xsum([x[f, c] for f in range(nb_facilities)]) == 1) # one facility constraint_capacity = {} for f in range(nb_facilities): for c in range(nb_customers): s.add_constr(used[f, 0] >= x[f, c]) constraint_capacity[f] = s.add_constr(mip.xsum([x[f, c] * customers[c].demand for c in range(nb_customers)]) <= facilities[f].capacity) new_obj_f = mip.LinExpr(const=0.) new_obj_f.add_expr(mip.xsum([facilities[f].setup_cost * used[f, 0] for f in range(nb_facilities)])) new_obj_f.add_expr(mip.xsum([matrix_length[f, c] * x[f, c] for f in range(nb_facilities) for c in range(nb_customers)])) s.objective = new_obj_f self.model = s self.variable_decision = {"x": x} self.constraints_dict = {"constraint_customer": constraints_customer, "constraint_capacity": constraint_capacity} self.description_variable_description = {"x": {"shape": (nb_facilities, nb_customers), "type": bool, "descr": "for each facility/customer indicate" " if the pair is active, meaning " "that the customer c is dealt with facility f"}} self.description_constraint = {"Im lazy."} print("Initialized")
def mip_model(self, representation, labeled_idx, budget, delta, outlier_count, greedy_indices=None): model = mip.Model("Core Set Selection") # set up the variables: points = {} outliers = {} feasible_start = [] for i in range(representation.shape[0]): if i in labeled_idx: points[i] = model.add_var(ub=1.0, lb=1.0, var_type=BINARY, name="points_{}".format(i)) else: points[i] = model.add_var(var_type=BINARY, name="points_{}".format(i)) for i in range(representation.shape[0]): name = "outliers_{}".format(i) outliers[i] = model.add_var(var_type=BINARY, name=name) # outliers[i].start = 0 feasible_start.append((outliers[i], 0.0)) # initialize the solution to be the greedy solution: if greedy_indices is not None: for i in greedy_indices: # points[i].start = 1.0 # gurobi feasible_start.append((points[i], 1)) model.start = feasible_start # set the outlier budget: model.add_constr(xsum(outliers[i] for i in outliers) <= outlier_count, "budget") # build the graph and set the constraints: model.add_constr(xsum(points[i] for i in range(representation.shape[0])) == budget, "budget") neighbors = {} graph = {} print("Updating Neighborhoods In MIP Model...") for i in range(0, representation.shape[0], 1000): print("At Point " + str(i)) if i + 1000 > representation.shape[0]: distances = self.get_distance_matrix(representation[i:], representation) amount = representation.shape[0] - i else: distances = self.get_distance_matrix(representation[i:i + 1000], representation) amount = 1000 distances = np.reshape(distances, (amount, -1)) for j in range(i, i + amount): graph[j] = [(idx, distances[j - i, idx]) for idx in np.reshape(np.where(distances[j - i, :] <= delta), (-1))] neighbors[j] = [points[idx] for idx in np.reshape(np.where(distances[j - i, :] <= delta), (-1))] neighbors[j].append(outliers[j]) model.add_constr(xsum(neighbors[j]) >= 1, "coverage+outliers") model.__data = points, outliers model.emphasis = 1 model.threads = -1 # model.max_seconds = self.max_seconds return model, graph
def _create_constraints_to_balance_unit_nodes(self): for unit in self.units: for i in range(0, self.planning_horizon): in_flow_vars = [var for var_name, var in self.unit_in_flow_variables[unit][i].items()] out_flow_vars = [var for var_name, var in self.unit_out_flow_variables[unit][i].items()] if unit in self.unit_commitment_vars: min_loading_var = [self.unit_commitment_vars[unit]['state'][i] * self.unit_min_loading[unit] * -1] self.model += xsum(in_flow_vars + [-1 * var for var in out_flow_vars] + min_loading_var) == 0.0 else: self.model += xsum(in_flow_vars + [-1 * var for var in out_flow_vars]) == 0.0
def add_sbox(model, Q_t, xs, ys=None): # non-zero input `xs` active the corresponding sbox model += mip.xsum(xs) >= Q_t for xs_i in xs: model += Q_t >= xs_i if ys is not None: # Because impossible constraints of SBox is conditional, # we need additional constraint for non-zero output `ys` # resulting in non-zero input `xs` model += len(ys) * mip.xsum(xs) >= mip.xsum(ys)
def knapsack(): p = [10, 13, 18, 31, 7, 15] w = [11, 15, 20, 35, 10, 33] c, I = 47, range(len(w)) m = Model("knapsack") x = [m.add_var(var_type=BINARY) for i in I] m.objective = maximize(xsum(p[i] * x[i] for i in I)) m += xsum(w[i] * x[i] for i in I) <= c print(m.optimize())
def do_matching_stable(graph, visualize=True, individual=1, communal=10000000): print("Starting model") weights = dict() graph = {int(key): graph[key] for key in graph} E = set() V = graph.keys() inputs = {v: [] for v in V} outputs = {v: [] for v in V} for v in V: original = v for u, weight in graph[original]: s, t = (u, v) if u < v else (v, u) edge = (s, t) E.add(edge) weights[(original, u)] = weight outputs[original].append(u) inputs[u].append(original) if visualize: graph = nx.Graph() graph.add_nodes_from(V) graph.add_edges_from(E) nx.draw_kamada_kawai(graph) plt.show() model = Model("Rogue Couples based") edge_vars = {e: model.add_var(var_type=BINARY) for e in E} undirected = dict() for e in E: undirected[e] = edge_vars[e] undirected[e[1], e[0]] = edge_vars[e] rogue_vars = {e: model.add_var(var_type=BINARY) for e in E} partners = dict() for v in V: partners[v] = model.add_var() partners[v] = xsum(edge_vars[s, t] for s, t in E if v in [s, t]) model += partners[v] <= 1 for (u, v), rogue_var in rogue_vars.items(): v_primes = [ vp for vp in outputs[u] if weights[(u, vp)] < weights[(u, v)] ] u_primes = [ up for up in outputs[v] if weights[(v, up)] < weights[(v, u)] ] model += 1 - partners[v] - partners[u] + xsum( undirected[u, vp] for vp in v_primes) + xsum(undirected[up, v] for up in u_primes) <= rogue_var model.objective = maximize(individual * xsum( ((weights[edge] + weights[edge[1], edge[0]]) / 2) * edge_vars[edge] for edge in E) - communal * xsum(rogue_vars[edge] for edge in E)) model.optimize(max_seconds=300) return sorted([e for e in E if edge_vars[e].x > .01])
def test_queens(solver: str): """MIP model n-queens""" n = 50 queens = Model("queens", MAXIMIZE, solver_name=solver) queens.verbose = 0 x = [[ queens.add_var("x({},{})".format(i, j), var_type=BINARY) for j in range(n) ] for i in range(n)] # one per row for i in range(n): queens += xsum(x[i][j] for j in range(n)) == 1, "row({})".format(i) # one per column for j in range(n): queens += xsum(x[i][j] for i in range(n)) == 1, "col({})".format(j) # diagonal \ for p, k in enumerate(range(2 - n, n - 2 + 1)): queens += ( xsum(x[i][j] for i in range(n) for j in range(n) if i - j == k) <= 1, "diag1({})".format(p), ) # diagonal / for p, k in enumerate(range(3, n + n)): queens += ( xsum(x[i][j] for i in range(n) for j in range(n) if i + j == k) <= 1, "diag2({})".format(p), ) queens.optimize() assert queens.status == OptimizationStatus.OPTIMAL # "model status" # querying problem variables and checking opt results total_queens = 0 for v in queens.vars: # basic integrality test assert v.x <= TOL or v.x >= 1 - TOL total_queens += v.x # solution feasibility rows_with_queens = 0 for i in range(n): if abs(sum(x[i][j].x for j in range(n)) - 1) <= TOL: rows_with_queens += 1 assert abs(total_queens - n) <= TOL # "feasible solution" assert rows_with_queens == n # "feasible solution"
def possible(c, n, history): model = Model() model.verbose = 0 assign = [[model.add_var(var_type=BINARY) for j in range(c)] for i in range(n)] for i in range(n): model += xsum(assign[i][j] for j in range(c)) == 1 for (tab, sc) in history: model += xsum(assign[i][tab[i]] for i in range(n)) == sc model.optimize() tab = [max([(assign[i][j].x, j) for j in range(c)])[1] for i in range(n)] return tab
def add_impos(self, pattern, X, Y): """ remove impossible case for a given pattern """ sz_i = len(X) sz_o = len(Y) self += xsum(X[i] * (1 - pattern.get(f"X{i}", 1)) + (1 - X[i]) * pattern.get(f"X{i}", 0) for i in range(sz_i)) + xsum( Y[j] * (1 - pattern.get(f"Y{j}", 1)) + (1 - Y[j]) * pattern.get(f"Y{j}", 0) for j in range(sz_o)) >= 1