def optimal_split(self, ratio=0.5): """Function that returns the optimal split for a certain ratio of the potential (default to 0.5) Keyword Arguments: ratio {float} -- The ratio of the potential needed (default: {0.5}) Returns: list tuple -- Returns the tuple (A, B) representing the partitions. """ if (sum(self.game_state) == 1): if (randint(1, 100) <= 50): return self.game_state, [0] * (self.K + 1) else: return [0] * (self.K + 1), self.game_state else: m = Model("") x = [m.add_var(var_type=INTEGER) for i in self.game_state] m.objective = minimize( sum([ 2**(-(self.K - i)) * c for c, i in zip(x, range(self.K + 1)) ]) - ratio * self.potential(self.game_state)) for i in range(len(x)): m += 0 <= x[i] m += x[i] <= self.game_state[i] m += sum([ 2**(-(self.K - i)) * c for c, i in zip(x, range(self.K + 1)) ]) >= ratio * self.potential(self.game_state) m.optimize() Abis = [0] * (self.K + 1) for i in range(len(x)): Abis[i] = int(x[i].x) B = [z - a for z, a in zip(self.game_state, Abis)] return Abis, B
def build(self, min_obj_value=None, max_n_solutions=None): self.V = set(self.graph.nodes()) self.n = len(self.V) seed(0) self.Arcs = self.graph.edges() self.add_variables() self.add_constraints() if min_obj_value is not None: constr = xsum(self.graph[i][j]['weight'] * self.x[(i, j)] for (i, j) in self.Arcs) > min_obj_value self.model += constr # objective function: minimize the distance self.model.objective = minimize( xsum(self.graph[i][j]['weight'] * self.x[(i, j)] for (i, j) in self.Arcs)) self.F = self.get_farthest_point_list() self.model.cuts_generator = SubTourCutGenerator( self.F, self.x, self.V, self.graph) return self
def find_optimal_pairs(N, weights) -> (float, list[tuple[int, int]]): """ find_optimal_pairs finds an optimal set of pairs of integers between 0 and N-1 (incl) that minimize the sum of the weights specified for each pair. Returns the objective value and list of pairs. """ pairs = [(i, j) for i in range(N) for j in range(i, N)] # note: people are excluded from the round by pairing with themselves def pairs_containing(k): return chain(((i, k) for i in range(k)), ((k, i) for i in range(k, N))) m = mip.Model() p = {(i, j): m.add_var(var_type=mip.BINARY) for i, j in pairs} # Constraint: a person can only be in one pair, so sum of all pairs with person k must be 1 for k in range(N): m += mip.xsum(p[i, j] for i, j in pairs_containing(k)) == 1 m.objective = mip.minimize( mip.xsum(weights[i, j] * p[i, j] for i, j in pairs)) m.verbose = False status = m.optimize() if status != mip.OptimizationStatus.OPTIMAL: raise Exception("not optimal") return m.objective_value, [(i, j) for i, j in pairs if p[i, j].x > 0.5]
def set_objective(self, ingredients): """ The objective function :param ingredients: The ingredients for the cocktail """ self.model.objective = mip.minimize( mip.xsum(self.x[i] for i in ingredients))
def find_optimal_pairs(N, weights) -> list[tuple[int, int]]: """ find_optimal_pairs finds an optimal set of pairs of integers between 0 and N-1 (incl) that minimize the sum of the weights specified for each pair. """ pairs = [(i, j) for i in range(N - 1) for j in range(i + 1, N)] def pairs_containing(k): return chain(((i, k) for i in range(k)), ((k, i) for i in range(k + 1, N))) m = mip.Model() p = {(i, j): m.add_var(var_type=mip.BINARY) for i, j in pairs} # Constraint: a person can only be in one pair, so sum of all pairs with person k must be 1 for k in range(N): m += mip.xsum(p[i, j] for i, j in pairs_containing(k)) == 1 m.objective = mip.minimize( mip.xsum(weights(i, j) * p[i, j] for i, j in pairs)) m.verbose = False status = m.optimize() if status != mip.OptimizationStatus.OPTIMAL: raise Exception("not optimal" + status) print("Objective value =", m.objective_value) return [(i, j) for i, j in pairs if p[i, j].x > 0.5]
def _add_opt_goal( m: Model, v_list: List[Vertex], v2var_x: Dict[Vertex, Var], v2var_y1: Dict[Vertex, Var], v2var_y2: Dict[Vertex, Var], ) -> None: # add optimization goal all_edges = get_all_edges(v_list) e2cost_var = {e: m.add_var(var_type=INTEGER, name=f'intra_{e.name}') for e in all_edges} # note pos is different from slot_idx, becasue the x dimension is different from the y dimention # we will use |(y1 * 2 + y1) - (y2 * 2 + y2)| + |x1 - x2| to express the hamming distance pos_y = lambda v : v2var_y1[v] * 2 + v2var_y2[v] pos_x = lambda v : v2var_x[v] cost_y = lambda e : pos_y(e.src) - pos_y(e.dst) cost_x = lambda e : pos_x(e.src) - pos_x(e.dst) for e, cost_var in e2cost_var.items(): m += cost_var >= cost_y(e) + cost_x(e) m += cost_var >= -cost_y(e) + cost_x(e) m += cost_var >= cost_y(e) - cost_x(e) m += cost_var >= -cost_y(e) - cost_x(e) m.objective = minimize(xsum(cost_var * e.width for e, cost_var in e2cost_var.items() ) )
def min_unproportionality_allocation(utilities: Dict[int, List[float]], m: Model) -> None: """ Computes (one of) the item allocation(s) which minimizes global unproportionality (observe we only sum unproportionality when it is larger than 0). :param utilities: the dictionary representing the utility profile, where each key is an agent and its value an array of floats such that the i-th float is the utility of the i-th item for the key-agent. :param m: the MIP model to optimize. :return: a dictionary mapping to each agent the bundle which has been assigned to her so that unproportionality is minimized. """ agents, items = len(utilities), len(list(utilities.values())[0]) dummies = [ m.add_var(name='dummy_{}'.format(agent), var_type=CONTINUOUS) for agent in range(agents) ] m.objective = minimize( xsum( m.var_by_name('dummy_{}'.format(agent)) for agent in range(agents))) for agent in range(agents): m += m.var_by_name('dummy_{}'.format(agent)) >= 0 m += m.var_by_name('dummy_{}'.format(agent)) >= (sum(utilities[agent][item] for item in range(items)) / agents)\ - (sum(utilities[agent][item] * m.var_by_name('assign_{}_{}'.format(item ,agent)) for item in range(items))) m.optimize()
def _add_opt_goal(self, m: Model, v2var: Dict[str, Var], direction: Dir) -> None: """ minimize the weighted sum over all edges """ edge_list = get_all_edges(list(self.curr_v2s.keys())) e2cost_var = { e: m.add_var(var_type=INTEGER, name=f'e_cost_{e.name}') for e in edge_list } def _get_loc_after_partition(v: Vertex): if direction == Dir.vertical: return self.curr_v2s[v].getQuarterPositionX( ) + v2var[v] * self.curr_v2s[v].getHalfLenX() elif direction == Dir.horizontal: return self.curr_v2s[v].getQuarterPositionY( ) + v2var[v] * self.curr_v2s[v].getHalfLenY() else: assert False for e, cost_var in e2cost_var.items(): m += cost_var >= _get_loc_after_partition( e.src) - _get_loc_after_partition(e.dst) m += cost_var >= _get_loc_after_partition( e.dst) - _get_loc_after_partition(e.src) m.objective = minimize( xsum(cost_var * e.width for e, cost_var in e2cost_var.items()))
def _compute_integer_image_sizes(image_sizes: List[Size], layout: Layout) -> List[Size]: import mip constraints = layout.get_constraints(image_sizes) aspect_ratios = [h / w for w, h in image_sizes] # set up a mixed-integer program, and solve it n_images = len(image_sizes) m = mip.Model() var_widths = [m.add_var(var_type=mip.INTEGER) for _ in range(n_images)] var_heights = [m.add_var(var_type=mip.INTEGER) for _ in range(n_images)] for c in constraints: if c.is_height: vars = ([var_heights[i] for i in c.positive_ids] + [-var_heights[i] for i in c.negative_ids]) else: vars = ([var_widths[i] for i in c.positive_ids] + [-var_widths[i] for i in c.negative_ids]) m.add_constr(mip.xsum(vars) == c.result) # the errors come from a deviation in aspect ratio var_errs = [m.add_var(var_type=mip.CONTINUOUS) for _ in range(n_images)] for err, w, h, ar in zip(var_errs, var_widths, var_heights, aspect_ratios): m.add_constr(err == h - w * ar) # To minimise error, we need to create a convex cost function. Common # options are either abs(err) or err ** 2. However, both these functions are # non-linear, so cannot be directly computed in MIP. We can represent abs # exactly with a type-1 SOS, and approximate ** 2 with a type-2 SOS. Here we # use abs. var_errs_pos = [ m.add_var(var_type=mip.CONTINUOUS) for _ in range(n_images) ] var_errs_neg = [ m.add_var(var_type=mip.CONTINUOUS) for _ in range(n_images) ] var_abs_errs = [ m.add_var(var_type=mip.CONTINUOUS) for _ in range(n_images) ] for abs_err, err, err_pos, err_neg in zip(var_abs_errs, var_errs, var_errs_pos, var_errs_neg): # err_pos and err_neg are both positive representing each side of the # abs function. Only one will be non-zero (SOS Type-1). m.add_constr(err == err_pos - err_neg) m.add_constr(abs_err == err_pos + err_neg) m.add_sos([(err_pos, 1), (err_neg, -1)], sos_type=1) m.objective = mip.minimize(mip.xsum(var_abs_errs)) m.optimize(max_seconds=30) new_sizes = [ Size(int(w.x), int(h.x)) for w, h in zip(var_widths, var_heights) ] return new_sizes
def add_opt_goal(self, m: Model, fifo_to_paths: Dict[Edge, List[RoutingPath]], path_to_var: Dict[RoutingPath, Var]) -> None: """ minimize the total length * width of all selected paths """ # concatenate to get all paths all_paths: List[RoutingPath] = sum(fifo_to_paths.values(), []) m.objective = minimize( xsum(path_to_var[path] * path.get_cost() for path in all_paths))
def _add_opt_goal(m: Model, v_to_s_to_cost: Dict[Vertex, Dict[Slot, int]], v_to_s_to_var: Dict[Vertex, Dict[Slot, Var]]) -> None: """ minimize the cost """ cost_var_pair_list: List[Tuple[int, Var]] = [] for v, s_to_var in v_to_s_to_var.items(): for s, var in s_to_var.items(): cost = v_to_s_to_cost[v][s] cost_var_pair_list.append((cost, var)) m.objective = minimize(xsum(cost * var for cost, var in cost_var_pair_list))
def create_mip(solver, J, dur, S, c, r, EST, relax=False, sense=MINIMIZE): """Creates a mip model to solve the RCPSP""" NR = len(c) mip = Model(solver_name=solver) sd = sum(dur[j] for j in J) vt = CONTINUOUS if relax else BINARY x = [ { t: mip.add_var("x(%d,%d)" % (j, t), var_type=vt) for t in range(EST[j], sd + 1) } for j in J ] TJ = [set(x[j].keys()) for j in J] T = set() for j in J: T = T.union(TJ[j]) if sense == MINIMIZE: mip.objective = minimize(xsum(t * x[J[-1]][t] for t in TJ[-1])) else: mip.objective = maximize(xsum(t * x[J[-1]][t] for t in TJ[-1])) # one time per job for j in J: mip += xsum(x[j][t] for t in TJ[j]) == 1, "selTime(%d)" % j # precedences for (u, v) in S: mip += ( xsum(t * x[v][t] for t in TJ[v]) >= xsum(t * x[u][t] for t in TJ[u]) + dur[u], "prec(%d,%d)" % (u, v), ) # resource usage for t in T: for ir in range(NR): mip += ( xsum( r[ir][j] * x[j][tl] for j in J[1:-1] for tl in TJ[j].intersection( set(range(t - dur[j] + 1, t + 1)) ) ) <= c[ir], "resUsage(%d,%d)" % (ir, t), ) return mip
def solve_model(model, varrange, state_values, min_frames=True, output=True, relaxation=False): size = len(varrange) max_damage = 0 min_r_frames = 0 min_d_frames = 0 dps = 0 solution = { 'dataTable' : [], 'decisionVariables' : [] } # testpath = os.getcwd() + '/lptemplates/lpfiles/TESTING.lp' model.objective = mip.maximize(mip.xsum(state_values['damage'][i]*varrange[i] for i in range(size))) # model.write(testpath) # a test file for debugging if not output: model.verbose = 0 model.optimize(relax=relaxation) # right here is where you'd change model properties for speed print(model.status) if model.status not in [mip.OptimizationStatus.OPTIMAL]: solution['dataTable'].append({'id' : 'status', 'value' : 'INFEASIBLE'}) solution['decisionVariables'].append({'id' : 'N/A', 'value' : 'N/A'}) return solution max_damage = model.objective_value if min_frames: model += mip.xsum(state_values['damage'][i]*varrange[i] for i in range(size)) == max_damage model.objective = mip.minimize(mip.xsum(state_values['realframes'][i]*varrange[i] for i in range(size))) model.optimize(relax=relaxation) for i in range(size): if abs(varrange[i].x) > 1e-6: # only non-zeros if 'Dummy' in varrange[i].name: continue solution['decisionVariables'].append({'id' : [varrange[i].name], 'value' : varrange[i].x}) min_r_frames += varrange[i].x*state_values['realframes'][i] min_d_frames += varrange[i].x*state_values['frames'][i] if min_r_frames <= 0: dps = 0 else: dps = round((60*max_damage/min_r_frames), 2) solution['dataTable'].append({'id' : 'Max Damage', 'value' : max_damage}) solution['dataTable'].append({'id' : 'DPS', 'value' : dps}) solution['dataTable'].append({'id' : 'Dragon Time', 'value' : min_d_frames}) solution['dataTable'].append({'id' : 'Real Time', 'value' : min_r_frames}) if output: print('solution:') for v in model.vars: if abs(v.x) > 1e-6: # only printing non-zeros print('{} : {}'.format(v.name, v.x)) print(model.status) for statistic in solution['dataTable']: print('{:11} : {:10}'.format(statistic['id'], statistic['value'])) return solution
def equivalent_tol(self): """Return list of components which in series/parallel are within tolerance of the target value, minimising the number of components in use. Args: components: float values of the resistors/capacitors to choose from. A component value can be used as many times as it occurs in this list. target: The target value. series: True for series, false for parallel. tol: Solved resistance will be in range [(1-tol)*target, (1+tol)*target, % resistor: True for resistors and inductors, False for capacitors Returns: Optimal component values, or empty list if no solution. """ tol = self.tolerance/100 # This is what sets the different parallel and series combination # depending on the component type. Reasoning = Boolean algebra. condition = (not self.resistor and not self.series) or (self.series and self.resistor) _target = self.target if condition else 1/self.target _components = self.components if condition else [1/x for x in self.components] lower = (1-tol) * self.target if condition else 1/((1+tol) * self.target) upper = (1+tol) * self.target if condition else 1/((1-tol) * self.target) m = mip.Model() # Create new mixed integer/linear model. r_in_use = [m.add_var(var_type=mip.BINARY) for _ in _components] opt_r = sum([b * r for b, r in zip(r_in_use, _components)]) m += opt_r >= lower m += opt_r <= upper m.objective = mip.minimize(mip.xsum(r_in_use)) m.verbose = False sol_status = m.optimize() if sol_status != mip.OptimizationStatus.OPTIMAL: print('No solution found') return [] r_in_use_sol = [float(v) for v in r_in_use] r_to_use = [r for r, i in zip(self.components, r_in_use_sol) if i > 0] solved_values = sum(x for x in r_to_use) if condition else 1/sum(1/x for x in r_to_use) solved_error = 100 * (solved_values - self.target) / self.target print(f'{"Resistors/Inductors" if self.resistor else "Capacitors"}; {r_to_use} in {"series" if self.series else "parallel"} ' f'will produce {"resistance/inductance" if self.resistor else "capacitance"} = {solved_values:.3f}'\ f' {"R/I" if self.resistor else "C"}. Aiming for {"R/I" if self.resistor else "C"} = {self.target:.3f}, ' f'error of {solved_error:.2f}%') return r_to_use
def solve_zero_one_linear_program(c, A, b, solver): """Minimize c*x x is binary A*c <= b """ assert A.shape[1] == c.shape[0] assert A.shape[1] == b.shape[0] out = None if solver == "cvxpy": start = time.time() print("Solving integer program of shape {}...".format(A.shape)) # The variable we are solving for selection = cvxpy.Variable(c.shape[0], boolean=True) weight_constraint = A * selection <= b # We tell cvxpy that we want to maximize total utility # subject to weight_constraint. All constraints in # cvxpy must be passed as a list problem = cvxpy.Problem(cvxpy.Minimize(c * selection), [weight_constraint]) # Solving the problem problem.solve(solver=cvxpy.GLPK_MI, verbose=True) print("Integer program solved in {}!".format(time.time() - start)) out = np.array(list(problem.solution.primal_vars.values())[0], dtype=bool) elif solver == "mip": m = Model() x = [m.add_var(var_type=BINARY) for i in range(len(c))] m.objective = minimize(xsum(c[i] * x[i] for i in range(len(c)))) for i in range(A.shape[0]): m += xsum(A[i, j] * x[j] for j in range(len(c))) <= b[i] m.optimize() out = np.array([x[i].x >= 0.99 for i in range(len(c))]) elif solver == "approximate": print("using approximate solution") solution = linprog(c=c, A_ub=A, b_ub=b) out = remove_overlapping_in_order(A=A, out=np.round(solution.x) > 0) else: raise ValueError("Solver {} not recognized".format(solver)) assert out is not None return out
def solveTSP(adjMatrixSubGraph, listPath, nodeKantor, listNode, mapIdxToNode, mapNodeToIdx): model = Model() listNode.insert(0, nodeKantor) n = len(listNode) # add variable x = [[model.add_var(var_type=BINARY) for j in range(n)] for i in range(n)] y = [model.add_var() for i in range(n)] # add objective function model.objective = minimize( xsum(adjMatrixSubGraph[mapNodeToIdx[listNode[i]]][mapNodeToIdx[ listNode[j]]] * x[i][j] for i in range(n) for j in range(n))) V = set(range(n)) # constraint : leave each city only once for i in V: model += xsum(x[i][j] for j in V - {i}) == 1 # constraint : enter each city only once for i in V: model += xsum(x[j][i] for j in V - {i}) == 1 # subtour elimination for (i, j) in product(V - {0}, V - {0}): if i != j: model += y[i] - (n + 1) * x[i][j] >= y[j] - n # optimizing model.optimize(max_seconds=30) res = [] # checking if a solution was found if model.num_solutions: print("SOLUTION FOUND") for i in range(n): for j in range(n): if (x[i][j].x == 1): print( listNode[i], " ", listNode[j], " : ", listPath[mapNodeToIdx[listNode[i]]][mapNodeToIdx[ listNode[j]]]) res.append((listNode[i], listNode[j])) else: print("gak ketemu") return res
def latency_balancing(graph, fifo_to_path) -> Dict[str, int]: name_to_edge: Dict[str, Edge] = graph.getNameToEdgeMap() name_to_vertex: Dict[str, Vertex] = graph.getNameToVertexMap() m = get_mip_model_silent() # map Vertex -> "arrival time" vertex_to_var = {} for name, v in name_to_vertex.items(): vertex_to_var[v] = m.add_var(var_type=INTEGER, name=name) # differential constraint for each edge for e_name, e in name_to_edge.items(): # +1 because each FIFO by itself has 1 unit of latency # in case the original design is not balanced # note that additional pipelining for full_n will not lead to additional latency # we only need to increase the grace period of almost full FIFOs by 1 # [update]: we skip the +1 for the orginial FIFO. We only take care of our own modifications m += vertex_to_var[e.src] >= vertex_to_var[e.dst] + get_latency( fifo_to_path[e]) m.objective = minimize( xsum(e.width * (vertex_to_var[e.src] - vertex_to_var[e.dst]) for e in name_to_edge.values())) status = m.optimize(max_seconds=120) if status != OptimizationStatus.OPTIMAL and status != OptimizationStatus.FEASIBLE: cli_logger.warning( 'Failed to balance reconvergent paths at loop level. Most likely there is a loop of streams.' ) return {} # get result fifo_name_to_depth = {} for e_name, e in name_to_edge.items(): e.added_depth_for_rebalance = int( vertex_to_var[e.src].x - vertex_to_var[e.dst].x) - e.pipeline_level fifo_name_to_depth[e_name] = e.depth + e.added_depth_for_rebalance assert e.added_depth_for_rebalance >= 0 # logging for e_name, e in name_to_edge.items(): _logger.info( f'{e_name}: pipeline_level: {e.pipeline_level}, original depth: {e.depth}, added_depth_for_rebalance: {e.added_depth_for_rebalance}, width: {e.width} ' ) return fifo_name_to_depth
def create_mip(solver, w, h, W, relax=False): m = Model(solver_name=solver) n = len(w) I = set(range(n)) S = [[j for j in I if h[j] <= h[i]] for i in I] G = [[j for j in I if h[j] >= h[i]] for i in I] if relax: x = [{ j: m.add_var( var_type=CONTINUOUS, lb=0.0, ub=1.0, name="x({},{})".format(i, j), ) for j in S[i] } for i in I] else: x = [{ j: m.add_var(var_type=BINARY, name="x({},{})".format(i, j)) for j in S[i] } for i in I] if relax: vtoth = m.add_var(name="H", lb=0.0, ub=sum(h), var_type=CONTINUOUS) else: vtoth = m.add_var(name="H", lb=0.0, ub=sum(h), var_type=INTEGER) toth = xsum(h[i] * x[i][i] for i in I) m.objective = minimize(toth) # each item should appear as larger item of the level # or as an item which belongs to the level of another item for i in I: m += xsum(x[j][i] for j in G[i]) == 1, "cons(1,{})".format(i) # represented items should respect remaining width for i in I: m += ( (xsum(w[j] * x[i][j] for j in S[i] if j != i) <= (W - w[i]) * x[i][i]), "cons(2,{})".format(i), ) return m
def pymip(self, data): # Binary weight matrix self.w = [[ self.model.add_var(var_type=BINARY) for j in range(self.clusters) ] for i in range(self.pixels)] print(len(self.w), len(self.w[0])) # Objective Funtion self.model.objective = minimize( xsum(self.w[i][j] * (self.euclDist(data[i], self.mu[j], None)**2) / 2 for j in range(self.clusters) for i in range(self.pixels))) #Constraints for i in range(self.pixels): self.model += xsum(self.w[i][j] for j in range(self.clusters)) == 1 for j in range(self.clusters): self.model += xsum(self.w[i][j] for i in range(self.pixels)) >= math.floor( self.pixels / self.clusters)
def solve(self): """Try to solve the problem and identify possible matches.""" self._check_linear_dependency() A_eq = self.A3D.reshape(-1, self.n_1 * self.n_2) n = self.n_1 * self.n_2 # PYTHON MIP: model = Model() x = [model.add_var(var_type=BINARY) for i in range(n)] model.objective = minimize(xsum(x[i] for i in range(n))) for i, row in enumerate(A_eq): model += xsum(int(row[j]) * x[j] for j in range(n)) == int(self.b[i]) model.emphasis = 2 model.verbose = 0 model.optimize(max_seconds=2) self.X_binary = np.asarray([x[i].x for i in range(n) ]).reshape(self.n_1, self.n_2)
def _add_opt_goal( m: Model, v_list: List[Vertex], v2var_y1: Dict[Vertex, Var], v2var_y2: Dict[Vertex, Var], ) -> None: # add optimization goal all_edges = get_all_edges(v_list) e2cost_var = {e: m.add_var(var_type=INTEGER, name=f'intra_{e.name}') for e in all_edges} # we will use |(y1 * 2 + y1) - (y2 * 2 + y2)|to express the hamming distance pos_y = lambda v : v2var_y1[v] * 2 + v2var_y2[v] cost_y = lambda e : pos_y(e.src) - pos_y(e.dst) for e, cost_var in e2cost_var.items(): m += cost_var >= cost_y(e) m += cost_var >= -cost_y(e) m.objective = minimize(xsum(cost_var * e.width for e, cost_var in e2cost_var.items() ) )
def mip_mip(x, y0, n, m, N, solver_name): from mip import Model, xsum, minimize # set big M M = 100 # generate model model = Model(name='linear regression_mip', solver_name=solver_name) model.verbose = 0 # variables r = [model.add_var(name='r({})'.format(i), var_type='C') for i in range(n)] t = [model.add_var(name='t({})'.format(i), var_type='C') for i in range(n)] q = [model.add_var(name='q({})'.format(i), var_type='B') for i in range(n)] b = [ model.add_var(name='b({})'.format(j), lb=-inf, var_type='C') for j in range(m) ] # objective model.objective = minimize(xsum(r[i] - t[i] for i in range(n))) # constraints for i in range(n): model += r[i] >= y0[i] - xsum(x[j, i] * b[j] for j in range(m)) model += r[i] >= xsum(x[j, i] * b[j] for j in range(m)) - y0[i] model += t[i] <= M * (1 - q[i]) model += t[i] <= r[i] model += xsum(q[i] for i in range(n)) >= N # optimize model.optimize() # get results r_sol = [r[i].x for i in range(n)] q_sol = [round(q[i].x) for i in range(n)] b_sol = [b[j].x for j in range(m)] t_sol = [t[i].x for i in range(n)] return r_sol, q_sol, b_sol
def mip_from_std_form(A, b, c): # initialize n = len(c) m = len(b) ilp = mip.Model() # variables x = [ ilp.add_var(name='x({})'.format(i), var_type=mip.INTEGER) for i in range(n) ] # constraints for j in range(m): a, bi = A[j, :], b[j] cons = mip.xsum(a[i] * x[i] for i in range(n) if a[i] != 0) <= 0 cons.add_const(-bi) ilp.add_constr(cons) # objective ilp.objective = mip.minimize(mip.xsum(c[i] * x[i] for i in range(n))) return ilp
def TSP_ILP(G): start = time() V1 = range(len(G)) n, V = len(G), set(V1) model = Model( ) # binary variables indicating if arc (i,j) is used on the route or not x = [[model.add_var(var_type=BINARY) for j in V] for i in V ] # continuous variable to prevent subtours: each city will have a # different sequential id in the planned route except the 1st one y = [model.add_var() for i in V] # objective function: minimize the distance model.objective = minimize(xsum(G[i][j] * x[i][j] for i in V for j in V)) # constraint : leave each city only once for i in V: model += xsum( x[i][j] for j in V - {i}) == 1 # constraint : enter each city only once for i in V: model += xsum(x[j][i] for j in V - {i}) == 1 # subtour elimination for (i, j) in product(V - {0}, V - {0}): if i != j: model += y[i] - (n + 1) * x[i][j] >= y[j] - n # optimizing model.optimize() # checking if a solution was found if model.num_solutions: print('Total distance {}'.format(model.objective_value)) nc = 0 # cycle starts from vertex 0 cycle = [nc] while True: nc = [i for i in V if x[nc][i].x >= 0.99][0] cycle.append(nc) if nc == 0: break return (model.objective_value, cycle)
def init_model(self): # 1. 选择求解器初始化 self.ap_model = Model(sense=MINIMIZE, solver_name=CBC) # 2. 定义决策变量 self.dec_vars = [[ self.ap_model.add_var(var_type=BINARY) for i in range(len(self.profit_matrix)) ] for j in range(len(self.profit_matrix))] # 3. 定义目标函数 self.ap_model.objective = minimize( xsum(self.dec_vars[i][j] * self.profit_matrix[i][j] for i in range(len(self.profit_matrix)) for j in range(len(self.profit_matrix)))) # 4. 定义约束条件 for i in range(len(self.profit_matrix)): # 每行只能有一个1 self.ap_model.add_constr( xsum(self.dec_vars[i][j] for j in range(len(self.profit_matrix))) == 1) for j in range(len(self.profit_matrix)): # 每列只能有一个1 self.ap_model.add_constr( xsum(self.dec_vars[i][j] for i in range(len(self.profit_matrix))) == 1) self.is_init = True
def add_objective_function(self, objective_function): """Add the objective function to the mip model. Examples -------- >>> decision_variables = pd.DataFrame({ ... 'variable_id': [0, 1, 2, 3, 4, 5], ... 'lower_bound': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], ... 'upper_bound': [5.0, 5.0, 5.0, 5.0, 5.0, 5.0], ... 'type': ['continuous', 'continuous', 'continuous', ... 'continuous', 'continuous', 'continuous']}) >>> objective_function = pd.DataFrame({ ... 'variable_id': [0, 1, 3, 4, 5], ... 'cost': [1.0, 2.0, -1.0, 5.0, 0.0]}) >>> si = InterfaceToSolver() >>> si.add_variables(decision_variables) >>> si.add_objective_function(objective_function) >>> print(si.mip_model.var_by_name('0').obj) 1.0 >>> print(si.mip_model.var_by_name('5').obj) 0.0 """ objective_function = objective_function.sort_values('variable_id') objective_function = objective_function.set_index('variable_id') obj = minimize( xsum(objective_function['cost'][i] * self.variables[i] for i in list(objective_function.index))) self.mip_model.objective = obj self.linear_mip_model.objective = obj
def optimal_comb(api: GBD, query, runtimes, timeout, k): result = api.query_search(query, [], runtimes) result = [[ int(float(val)) if is_number(val) and float(val) < float(timeout) else int(2 * timeout) for val in row[1:] ] for row in result] dataset = pd.DataFrame(result, columns=runtimes) dataset = dataset[(dataset != 2 * timeout).any(axis='columns')] model = mip.Model() instance_solver_vars = [[ model.add_var(f'x_{i}_{j}', var_type=mip.BINARY) for j in range(dataset.shape[1]) ] for i in range(dataset.shape[0])] solver_vars = [ model.add_var(f's_{j}', var_type=mip.BINARY) for j in range(dataset.shape[1]) ] for var_list in instance_solver_vars: # per-instance constraints model.add_constr(mip.xsum(var_list) == 1) for j in range(dataset.shape[1]): # per-solver-constraints model.add_constr( mip.xsum(instance_solver_vars[i][j] for i in range(dataset.shape[0])) <= dataset.shape[0] * solver_vars[j]) # "Implies" in Z3 model.add_constr(mip.xsum(solver_vars) <= k) model.objective = mip.minimize( mip.xsum(instance_solver_vars[i][j] * int(dataset.iloc[i, j]) for i in range(dataset.shape[0]) for j in range(dataset.shape[1]))) print(dataset.sum().min()) print(model.optimize()) print(model.objective_value) for index, item in enumerate([var.x for var in solver_vars]): if item > 0: print(runtimes[index])
def _build_model(self, structure: Atoms, solver_name: str, verbose: bool) -> mip.Model: """ Build a Python-MIP model based on the provided structure Parameters ---------- structure atomic configuration solver_name 'gurobi', alternatively 'grb', or 'cbc', searches for available solvers if not informed verbose whether to display solver messages on the screen """ # Create cluster maps self._create_cluster_maps(structure) # Initiate MIP model model = mip.Model('CE', solver_name=solver_name) model.solver.set_mip_gap(0) # avoid stopping prematurely model.solver.set_emphasis(2) # focus on finding optimal solution model.preprocess = 2 # maximum preprocessing # Set verbosity model.verbose = int(verbose) # Spin variables (remapped) for all atoms in the structure xs = { i: model.add_var(name='atom_{}'.format(i), var_type=BINARY) for subl in self._active_sublattices for i in subl.indices } ys = [ model.add_var(name='cluster_{}'.format(i), var_type=BINARY) for i in range(len(self._cluster_to_orbit_map)) ] # The objective function is added to 'model' first model.objective = mip.minimize(mip.xsum(self._get_total_energy(ys))) # Connect cluster variables to spin variables with cluster constraints # TODO: don't create cluster constraints for singlets constraint_count = 0 for i, cluster in enumerate(self._cluster_to_sites_map): orbit = self._cluster_to_orbit_map[i] parameter = self._transformed_parameters[orbit + 1] assert parameter != 0 if len(cluster) < 2 or parameter < 0: # no "downwards" pressure for atom in cluster: model.add_constr( ys[i] <= xs[atom], 'Decoration -> cluster {}'.format(constraint_count)) constraint_count = constraint_count + 1 if len(cluster) < 2 or parameter > 0: # no "upwards" pressure model.add_constr( ys[i] >= 1 - len(cluster) + mip.xsum(xs[atom] for atom in cluster), 'Decoration -> cluster {}'.format(constraint_count)) constraint_count = constraint_count + 1 for sym, subl in zip(self._count_symbols, self._active_sublattices): # Create slack variable slack = model.add_var(name='slackvar_{}'.format(sym), var_type=INTEGER, lb=0, ub=len(subl.indices)) # Add slack constraint model.add_constr(slack <= -1, name='{} slack'.format(sym)) # Set species constraint model.add_constr(mip.xsum([xs[i] for i in subl.indices]) + slack == -1, name='{} count'.format(sym)) # Update the model so that variables and constraints can be queried if model.solver_name.upper() in ['GRB', 'GUROBI']: model.solver.update() return model
n, V = len(coord), set(range(len(coord))) # distances matrix c = [[0 if i == j else dist(coord[i], coord[j]) for j in V] for i in V] model = Model() # binary variables indicating if arc (i,j) is used on the route or not x = [[model.add_var(var_type=BINARY) for j in V] for i in V] # continuous variable to prevent subtours: each city will have a # different sequential id in the planned route except the first one y = [model.add_var() for i in V] # objective function: minimize the distance model.objective = minimize(xsum(c[i][j] * x[i][j] for i in V for j in V)) # constraint : leave each city only once for i in V: model += xsum(x[i][j] for j in V - {i}) == 1 # constraint : enter each city only once for i in V: model += xsum(x[j][i] for j in V - {i}) == 1 # subtour elimination for (i, j) in product(V - {0}, V - {0}): if i != j: model += y[i] - (n + 1) * x[i][j] >= y[j] - n # optimizing
Arcs = [(i, j) for (i, j) in product(V, V) if i != j] # distance matrix c = [[round(sqrt((p[i][0]-p[j][0])**2 + (p[i][1]-p[j][1])**2)) for j in V] for i in V] model = Model() # binary variables indicating if arc (i,j) is used on the route or not x = [[model.add_var(var_type=BINARY) for j in V] for i in V] # continuous variable to prevent subtours: each city will have a # different sequential id in the planned route except the first one y = [model.add_var() for i in V] # objective function: minimize the distance model.objective = minimize(xsum(c[i][j]*x[i][j] for (i, j) in Arcs)) # constraint : leave each city only once for i in V: model += xsum(x[i][j] for j in V - {i}) == 1 # constraint : enter each city only once for i in V: model += xsum(x[j][i] for j in V - {i}) == 1 # (weak) subtour elimination # subtour elimination for (i, j) in product(V - {0}, V - {0}): if i != j: model += y[i] - (n+1)*x[i][j] >= y[j]-n