def check_redundancy_row(H, h, ROW, atol=10**-8): model = Model("Row Redundancy Check") n = H.shape[1] x = np.empty((n, 1), dtype='object') for row in range(n): x[row, 0] = model.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY) model.update() for row in [r for r in range(H.shape[0]) if r != ROW]: Hx = LinExpr() for column in range(n): Hx.add(H[row, column] * x[column, 0]) model.addConstr(Hx <= h[row, 0]) J = LinExpr() for column in range(n): J.add(H[ROW, column] * x[column, 0]) model.setObjective(J, GRB.MAXIMIZE) model.setParam('OutputFlag', False) model.optimize() if model.Status == 2: if J.getValue() > h[ROW, 0] + atol: return False # It is NOT redundant else: return True # It is redudant else: return False
def construct_lp_model(c, A, d): from gurobipy import Model, LinExpr, GRB # A = numpy.array (matrix) # c = numpy.array (vector) # d = numpy.array (vector) k, n = A.shape # Creation of the gurobi model m = Model("sp") # Variables x = list() for i in range(n): x.append(m.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY, name='x_%d' % i)) # Objective Function objExpr = LinExpr() for i in range(n): objExpr.add(x[i], c[i]) m.setObjective(objExpr, GRB.MAXIMIZE) # Constraints expr = {} for j in range(k): expr = LinExpr() for i in range(n): expr.add(x[i], A[j, i]) m.addConstr(expr, GRB.LESS_EQUAL, d[j]) # Update the model to add new entries m.update() return m
def constraints_list_of_tuples(model, mylist, sign="="): term_0 = mylist[0] ROWS, COLUMNS = term_0[0].shape[0], term_0[1].shape[1] for row in range(ROWS): for column in range(COLUMNS): expr = LinExpr() for term in mylist: q, qp = term[0].shape[1], term[1].shape[0] if q != qp: raise ValueError(term, "q=%d qp=%d" % (q, qp)) if type(term[1][0, column]) == type(model.addVar()): expr.add( LinExpr([(term[0][row, k], term[1][k, column]) for k in range(q)])) elif type(term[0][row, 0]) == type(model.addVar()): expr.add( LinExpr([(term[1][k, column], term[0][row, k]) for k in range(q)])) else: expr.addConstant( sum([ term[1][k, column] * term[0][row, k] for k in range(q) ])) if sign == "<": model.addConstr(expr <= 0) elif sign == "=": model.addConstr(expr == 0) elif sign == ">=": model.addConstr(expr >= 0) else: raise "sign indefinite"
def q2(): from gurobipy import Model, GRB, quicksum, LinExpr import numpy from functions import open_data # Creation of the model m = Model('LR') # Your code goes here x, y = open_data() N, n = len(x), len(x[0]) # The following example is here to help you build the model (you must adapt it !!!) # Define decision variables var = [] # w, b, z for i in range(n): var.append(m.addVar(lb = -GRB.INFINITY, vtype=GRB.CONTINUOUS, name = 'w_{}'.format(i), obj = 1)) var.append(m.addVar(lb = -GRB.INFINITY, vtype=GRB.CONTINUOUS, name = 'b'.format(i), obj = 1)) for i in range(N): var.append(m.addVar(lb = 0, vtype=GRB.CONTINUOUS, name = 'z_{}'.format(i), obj = 1)) #m.update() # Constraints expr={} for j in range(N): expr = LinExpr() for i in range(n): expr += -var[i]*x[j][i] expr += -var[n] expr += -var[n+1+j] m.addConstr(expr, GRB.LESS_EQUAL, -y[j]) expr = LinExpr() for i in range(n): expr.add(var[i], x[j][i]) expr += var[n] expr += -var[n+1+j] m.addConstr(expr, GRB.LESS_EQUAL, y[j]) # Define objective function: obj=LinExpr() for i in range(N): obj += var[n+1+i] m.setObjective(obj, GRB.MINIMIZE) m.update() m.optimize() m.write('q2.lp') # Your code goes here #if m.status == GRB.Status.OPTIMAL: z = m.objVal b = var[n].x w = [v.x for v in var[:n]] #print([v.x for v in var[n+1:]]) return ([z, b, w])
def constraints_Ab_smaller_c(model, A, b, c): delta = {} for row in range(A.shape[0]): lhs = LinExpr() delta[row] = model.addVar(lb=0, obj=1) model.update() for k in range(A.shape[1]): lhs.add(A[row, k] * b[k, 0]) model.addConstr(lhs <= c[row, 0] + delta[row])
def constraints_AB_smaller_c_H_d(model, A, b, c, epsilon, H, d): # A*b <= c*epsilon - H*d for row in range(A.shape[0]): lhs = LinExpr() Hd = LinExpr() for k in range(A.shape[1]): lhs.add(A[row, k] * b[k, 0]) for k in range(H.shape[1]): Hd.add(H[row, k] * d[k, 0]) model.addConstr(lhs <= c[row, 0] * epsilon - Hd)
def constraints_AB_eq_CD(model, A, B, C, D): for row in range(A.shape[0]): for column in range(B.shape[1]): lhs = LinExpr() rhs = LinExpr() for k in range(A.shape[1]): lhs.add(A[row, k] * B[k, column]) for k in range(C.shape[1]): rhs.add(C[row, k] * D[k, column]) model.addConstr(rhs == lhs)
def populate_benders_cut(duals, master, period, com, data, status): """ Returns the lhs and rhs parts of a benders cut. It does not determine if the cut is an optimality or a feasibility one (their coefficients are the same regardless) :param duals: model dual values (structure Subproblem_Duals) :param master: master gurobi model :param period: period in which we add the cut :param com: commodity in which we add the cut :param data: problem data :param status: subproblem status :return: rhs (double), lhs (Gurobi linear expression) """ # Grab cut coefficients from the subproblem flow_duals = duals.flow_duals ubound_duals = duals.bounds_duals optimality_dual = duals.optimality_dual origin, destination = data.origins[com], data.destinations[com] continuous_variable = master.getVarByName('flow_cost{}'.format( (period, com))) setup_variables = np.take(master._variables, master._bin_vars_idx)[:period + 1, :] const = -flow_duals[origin] + flow_duals[destination] coeff = optimality_dual if status == GRB.status.OPTIMAL else 0. lhs = LinExpr(const) lhs.add(continuous_variable, coeff) # for arc in xrange(len(data.arcs)): # lhs.add(LinExpr([1] * (period + 1), list(setup_variables[:, arc])), # ubound_duals[arc]) ubound_duals_nz_idx = np.nonzero(ubound_duals)[0] if ubound_duals_nz_idx.tolist(): y_vars = setup_variables.take(ubound_duals_nz_idx, axis=1).flatten('F').tolist() coeffs = ubound_duals[ubound_duals_nz_idx].repeat(period + 1) # lhs_trial = LinExpr(const + continuous_variable * optimality_dual) lhs.addTerms(coeffs, y_vars) return lhs
def q2(): from gurobipy import GRB, Model, quicksum, LinExpr import pandas as pd #import numpy # You can either extract A,B,c from q1 if you want from q1 import q1 A, b, c = q1() # Creation of the model m = Model('Dual') # Your code goes here N = len(c) # The following example is here to help you build the model (you must adapt it !!!) # Define decision variables lamb = [] for i in range(len(A)): lamb.append(m.addVar(lb = 0, vtype=GRB.CONTINUOUS, name = 'lamb_{}'.format(i), obj = 1)) # Define constraints: for i in range(len(c)): lhs = LinExpr() for j in range(len(A)): lhs.add(lamb[j], A[j][i]) m.addConstr(lhs, ">=" , rhs=c[i]) # Define objective function: objexpr = LinExpr() for i in range(len(b)): objexpr.add(lamb[i], b[i]) m.setObjective(objexpr, GRB.MINIMIZE) m.update() m.write('q2.lp') m.optimize() z = m.objVal lamb = [v.x for v in lamb[:4]] return ([z, lamb])
def terminal_constraint_set_bigM(s,x,G,model,list_of_polytopes): """ Facts: Here we use H-rep. Therefore, G_eps is used! Everything is used with bigM """ Lambda={} (n,n_g)=G.shape (n_p,n)=s.Pi.shape (n_h,n)=(2*s.n,s.n) z_pol={} bigM=2 for polytope in list_of_polytopes: Lambda[polytope]=np.empty((n_h,n_p),dtype='object') for row in range(n_h): for column in range(n_p): Lambda[polytope][row,column]=model.addVar(lb=0) z_pol[polytope]=model.addVar(vtype=GRB.BINARY) model.update() z_sum=LinExpr() for polytope in list_of_polytopes: z_sum.add(z_pol[polytope]) for row in range(n_h): for column in range(n_g): s_left=LinExpr() s_right=LinExpr() for k in range(n_p): s_left.add(Lambda[polytope][row,k]*s.Pi[k,column]) for k in range(n): s_right.add(polytope.polytope.H[row,k]*G[k,column]) model.addConstr(s_left==s_right) # Lambda * 1 <= H* for row in range(n_h): s_left=LinExpr() s_right=LinExpr() for k in range(n_p): s_left.add(Lambda[polytope][row,k]) for k in range(n): s_right.add(polytope.polytope.H[row,k]*x[k,0]) model.addConstr(s_left<=polytope.polytope.h[row,0]-s_right+bigM-bigM*z_pol[polytope]) model.addConstr(z_sum==1) return z_pol
def terminal_constraint_old(s, x, G, T, model, state): # Terminal Constraint if state.character != -1 and state.volume_flag == True: print("taking G approach") H = np.dot(s.Pi, state.Ginv) h = np.ones((s.Pi.shape[0], 1)) + np.dot(H, state.x) #print("Nonzero Volume!","Ginv=",state2.Ginv,"H=",H,"h=",h) subset(model, G[T], s.Pi, H, h, x[T]) else: print("taking Lambda approach") # The vertices by G[i,T] Lambda = {} for alpha in range(2**s.n): for beta in range(2**state.G.shape[1]): Lambda[alpha, beta] = model.addVar(lb=0) model.update() for alpha in range(2**s.n): for row in range(s.n): exp_left = LinExpr() exp_right = LinExpr() for k in range(s.n): exp_left.add(G[T][row, k] * s.vertices[alpha, :].reshape(s.n, 1)[k, 0]) for beta in range(2**state.G.shape[1]): exp_right.add( Lambda[alpha, beta] * state.vertices[beta, :].reshape(s.n, 1)[row, 0]) model.addConstr(x[T][row, 0] + exp_left == exp_right + state.x[row, 0]) lambda_sum = LinExpr() for beta in range(2**state.G.shape[1]): lambda_sum.add(Lambda[alpha, beta]) model.addConstr(lambda_sum <= 1)
def re_verification_tree_extension(s, x, G, i, state_end, factor=0.99): """ This is a method to deal with numerical inaccuracies in high dimensioanl MILPs that are terminated early. Requires solving a linear program. If feasible, the computed control startegy is recomputed. If infeasible, report back, raise a flag, and abort the tree extention Inputs: state_X, state_end Output: flag, u, theta """ model = Model("verifying tree extention") G_dynamic = np.empty((s.n, s.n), dtype='object') theta = np.empty((s.m, s.n), dtype='object') u = np.empty((s.m, 1), dtype='object') x_bar = np.empty((s.n, 1), dtype='object') for row in range(s.n): for column in range(s.n): G_dynamic[row, column] = model.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY) for row in range(s.m): u[row, 0] = model.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY) for column in range(s.n): theta[row, column] = model.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY) for row in range(s.n): x_bar[row, 0] = model.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY) model.update() for row in range(s.n): for column in range(s.n): AG = LinExpr() for k in range(s.n): AG.add(s.A[i][row, k] * G[k, column]) for k in range(s.m): AG.add(s.B[i][row, k] * theta[k, column]) model.addConstr(G_dynamic[row, column] == AG * factor) for row in range(s.n): Bu = LinExpr() for k in range(s.m): Bu.add(s.B[i][row, k] * u[k, 0]) model.addConstr(x_bar[row, 0] == np.dot(s.A[i], x)[row, 0] + Bu + s.c[i][row, 0]) eps = subset_eps(model, G_dynamic, s.Pi, state_end.polytope.H, state_end.polytope.h, x_bar) subset(model, theta, s.Pi, s.F[i], s.f[i], u) model.setParam("OutputFlag", False) model.optimize() if model.Status == 2: print("eps=", valuation(eps).T) if np.amax(valuation(eps)) > 10**-3: print( "\n-" * 10, "Too large of Epsilon: %d Tree extention failed!" % np.amax(valuation(eps)), "\n-" * 10) return None else: print("Tree extention verified succesuflly!") return (valuation(u), valuation(theta), valuation(eps)) else: print("\n-" * 10, "Infeasible! Tree extention failed!", "\n-" * 10) return None
def anchor_point(polytope): """ A point in H,h """ model=Model("Polytope Sampling") n=polytope.H.shape[1] x=np.empty((n,1),dtype="object") rho=np.empty((polytope.H.shape[0],1),dtype="object") for row in range(n): x[row,0]=model.addVar(lb=-GRB.INFINITY,ub=GRB.INFINITY) for row in range(polytope.H.shape[0]): rho[row,0]=model.addVar(lb=0,ub=GRB.INFINITY) model.update() J=QuadExpr(0) for row in range(polytope.H.shape[0]): a=LinExpr() for column in range(polytope.H.shape[1]): a.add(polytope.H[row,column]*x[column,0]) model.addConstr(a+rho[row,0]==polytope.h[row]) J.add(rho[row,0]*rho[row,0]) model.setParam('OutputFlag',False) model.setObjective(J) model.optimize() return valuation(x)
def _build_constraints(self): """ Build constraints. """ # Flow conservation for node in self._nw_container.network.nodes(): # out flow out_flow = LinExpr() for (u, v) in self._nw_container.network.out_edges(node): out_flow.add(self._edge_flow_vars[self._nw_container.network[u] [v][0]["data"].arc_id]) # in flow in_flow = LinExpr() for (u, v) in self._nw_container.network.in_edges(node): in_flow.add(self._edge_flow_vars[self._nw_container.network[u] [v][0]["data"].arc_id]) # rhs rhs = LinExpr() node_id = self._nw_container.network.nodes[node]["data"].node_id if isinstance(self._nw_container.network.nodes[node]["data"], Entry): rhs.add(self._node_flow_vars[node_id], 1.0) elif isinstance(self._nw_container.network.nodes[node]["data"], Exit): rhs.add(self._node_flow_vars[node_id], -1.0) else: rhs = 0 self._model.addConstr(out_flow - in_flow == rhs, f"flow_conservation_{node_id}") # Pressure increase constraint for compressor stations for (u, v) in self._nw_container.network.edges(): edge_data = self._nw_container.network[u][v][0]["data"] if isinstance(edge_data, CompressorStation): edge_id = edge_data.arc_id self._model.addConstr( self._node_press_vars[v] == self._node_press_vars[u] + self._compr_vars[edge_id], f"compressor_increase_{edge_id}", )
def subset_eps(model, G, Pi, H, h, x): """ Description: Add Farkas lemma constraints for subset inclusion of x+GP in {e|H.<h} Inputs: model: Gurobi optimization model G: n * n_g generator matrix Pi: n_pi*n matrix where {x|Pi x< 1} is the primitive polytope {x| Hx<h} is the set constraint x is the point Output: no direct output. Adds constraints to the model. FUTURE: we may add lambda positive matrix as an output for debugging """ (n, n_g) = G.shape (n_p, n) = Pi.shape (n_h, n) = H.shape Lambda = np.empty((n_h, n_p), dtype='object') eps = np.empty((n_h, 1), dtype='object') for row in range(n_h): for column in range(n_p): Lambda[row, column] = model.addVar(lb=0) eps[row, 0] = model.addVar(lb=0, obj=1) model.update() # Lambda * Pi = H * G for row in range(n_h): for column in range(n_g): s_left = LinExpr() s_right = LinExpr() for k in range(n_p): s_left.add(Lambda[row, k] * Pi[k, column]) for k in range(n): s_right.add(H[row, k] * G[k, column]) model.addConstr(s_left == s_right) # Lambda * 1 <= H* for row in range(n_h): s_left = LinExpr() s_right = LinExpr() for k in range(n_p): s_left.add(Lambda[row, k]) for k in range(n): s_right.add(H[row, k] * x[k, 0]) model.addConstr(s_left <= h[row, 0] - s_right + eps[row, 0]) return eps
def state_trajectory(s, x0, state_end, T): model = Model("trajectory of polytopes") x = {} u = {} theta = {} z = {} G_bound = 100 # Mode 1: for t in range(T): x[t] = np.empty((s.n, 1), dtype='object') # n*1 u[t] = np.empty((s.m, 1), dtype='object') # m*1 theta[t] = np.empty((s.m, s.n), dtype='object') # n*m for row in range(s.n): x[t][row, 0] = model.addVar(lb=-G_bound, ub=G_bound) for row in range(s.m): u[t][row, 0] = model.addVar(lb=-G_bound, ub=G_bound) for row in range(s.m): for column in range(s.n): theta[t][row, column] = 0 for t in range(T + 1): for i in s.modes: z[t, i] = model.addVar(vtype=GRB.BINARY) x[T] = np.empty((s.n, 1), dtype='object') # Final state in Mode i for row in range(s.n): x[T][row, 0] = model.addVar(lb=-G_bound, ub=G_bound) G = {} for t in range(T + 1): G[t] = np.empty((s.n, s.n), dtype='object') for row in range(s.n): for column in range(s.n): G[t][row, column] = 0 model.update() # Trajectory Constraints: # Mode i: bigM = 100 for i in s.modes: for t in range(T): for row in range(s.n): Ax = LinExpr() for k in range(s.n): Ax.add(s.A[i][row, k] * x[t][k, 0]) for k in range(s.m): Ax.add(s.B[i][row, k] * u[t][k, 0]) model.addConstr(x[t + 1][row, 0] <= Ax + s.c[i][row] + bigM - bigM * z[t, i]) model.addConstr(x[t + 1][row, 0] >= Ax + s.c[i][row] - bigM + bigM * z[t, i]) # Generator Dynamics Constraints: for i in s.modes: for t in range(T): for row in range(s.n): for column in range(s.n): AG = LinExpr() for k in range(s.n): AG.add(s.A[i][row, k] * G[t][k, column]) for k in range(s.m): AG.add(s.B[i][row, k] * theta[t][k, column]) model.addConstr( G[t + 1][row, column] <= AG + bigM - bigM * z[t, i]) model.addConstr( G[t + 1][row, column] >= AG - bigM + bigM * z[t, i]) # Constraints of modes: for t in range(T + 1): sum_z = LinExpr() for i in s.modes: sum_z.add(z[t, i]) model.addConstr(sum_z == 1) # Constraints of mode subsets for t in range(T): for i in s.modes: subset_MILP(model, G[t], s.Pi, s.H[i], s.h[i], x[t], z[t, i]) subset_MILP(model, theta[t], s.Pi, s.F[i], s.f[i], u[t], z[t, i]) # set objective model.update() # Terminal Constraint terminal_constraint(s, x, G, T, model, state_end) # Starting Point i_start = find_mode(s, x0) for i in s.modes: model.addConstr(z[0, i] == int(i == i_start)) model.setParam('OutputFlag', True) for row in range(s.n): model.addConstr(x[0][row, 0] == x0[row, 0]) model.optimize() if model.Status != 2 and model.Status != 11: flag = False # print("*"*20,"Flag is False and Status is",model.Status) return (x, u, z, flag) else: flag = True # print("*"*20,"Flag is True and Status is",model.Status) x_n = valuation(x) u_n = valuation(u) z_n = mode_sequence(s, z) return (x_n, u_n, z_n, flag)
def optimize_single_project(AT, j, project_list, project_activity, M): global _time_limit_per_model m = Model("SingleProject_%d" % j) m.params.timelimit = _time_limit_per_model # m.setParam('OutputFlag', False) # m.params.IntFeasTol = 1e-7 #### Create variables #### project = project_list[j] ## Project complete data,Project Tadeness,construction completion time CT = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="CT_%d" % j) ## Activity start time ST = {} project_activities = project_activity[project] for row in project_activities.nodes(): ST[row] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="ST_%d_%s" % (j, row)) ## Review sequence z_ij ## move to annealing objective function # y y = {} for activity_i in project_activities.nodes(): for activity_j in project_activities.nodes(): if activity_i != activity_j and len(list( set(project_activities.node[activity_i]['rk_resources']).intersection( project_activities.node[activity_j]['rk_resources']))) > 0: y[activity_i, activity_j] = m.addVar(obj=0, vtype=GRB.BINARY, name="y_%d_%s_%s" % (j, activity_i, activity_j)) m.update() ## Constrain 8: activity starting constrain # equation 20 for a in project_activities.nodes(): for r in project_activities.node[a]['resources']: m.addConstr(AT[j, r], GRB.LESS_EQUAL, ST[a], name="constraint_8_project_%d_activity_%s_resource_%s" % (j, a, r)) ## Constrain 9 activity sequence constrain # equation 21 for row1, row2 in project_activities.edges(): m.addConstr(ST[row1] + project_activities.node[row1]['duration'], GRB.LESS_EQUAL, ST[row2], name="constraint_9_project_%d_activity_%s_activity_%s" % (j, row1, row2)) ## Constrain 10,11 for row1 in project_activities.nodes(): for row2 in project_activities.nodes(): if row1 != row2 and len(list( set(project_activities.node[row1]['rk_resources']).intersection( project_activities.node[row2]['rk_resources']))) > 0: # equation 22 m.addConstr(ST[row1] + project_activities.node[row1]['duration'] - M * ( 1 - y[row1, row2]), GRB.LESS_EQUAL, ST[row2], name="constraint_10_project_%d_activity_%s_activity_%s" % (j, row1, row2)) # equation 23 m.addConstr( ST[row2] + project_activities.node[row2]['duration'] - M * (y[row1, row2]), GRB.LESS_EQUAL, ST[row1], name="constraint_11_project_%d_activity_%s_activity_%s" % (j, row1, row2)) # m.addConstr(y[j,row1,row2]+y[j,row2,row1],GRB.LESS_EQUAL,1) ## Constrain 12 # equation 24 for row in project_activities.nodes(): m.addConstr(CT, GRB.GREATER_EQUAL, ST[row] + project_activities.node[row]['duration'], name="constraint_12_project_%d_activity_%s" % (j, row)) m.update() # Set optimization objective - minimize completion time expr = LinExpr() expr.add(CT) m.setObjective(expr, GRB.MINIMIZE) m.update() ########################################## # m.params.presolve = 1 m.update() # Solve # m.params.presolve=0 m.optimize() m.write(join(_result_output_path, "round_%d_optimize_single_project_%d.lp" % (_round, j))) m.write(join(_result_output_path, "round_%d_optimize_single_project_%d.sol" % (_round, j))) # _logger.info("project %d with optimalVal %r" % (j, m.objVal)) # m.fixedModel() skj = _sensitivity_for_constraints(AT, j, project, y, project_activity, M) # for c in m.getConstrs(): # if c.ConstrName.startswith('constraint_8_project'): # splits = c.ConstrName.split('_') # if c.Pi == 0: # _logger.info('project %d bind resource:%s Slack:%.4g'%(j, splits[-1],c.Pi)) # break # else: # _logger.info('project %d not bind'%j) _single_project_objective_dataset.loc[_single_project_objective_dataset.shape[0]] = [_round, j, m.objVal] return m.objVal, skj
def trajectory_model(s, T): model = Model("polytopic trajectory of PWA systems") x = {} u = {} theta = {} z = {} G = {} G_bound = 10 for t in range(T): x[t] = np.empty((s.n, 1), dtype='object') # n*1 u[t] = np.empty((s.m, 1), dtype='object') # m*1 theta[t] = np.empty((s.m, s.n), dtype='object') # n*m for row in range(s.n): x[t][row, 0] = model.addVar(lb=-G_bound, ub=G_bound) for row in range(s.m): u[t][row, 0] = model.addVar(lb=-G_bound, ub=G_bound) for row in range(s.m): for column in range(s.n): theta[t][row, column] = model.addVar(lb=-G_bound, ub=G_bound) for t in range(T + 1): for i in s.modes: z[t, i] = model.addVar(vtype=GRB.BINARY) x[T] = np.empty((s.n, 1), dtype='object') # Final state in Mode i for row in range(s.n): x[T][row, 0] = model.addVar(lb=-G_bound, ub=G_bound) for t in range(T + 1): G[t] = np.empty((s.n, s.n), dtype='object') for row in range(s.n): for column in range(s.n): G[t][row, column] = model.addVar(lb=-G_bound, ub=G_bound) # Trajectory Constraints: bigM = G_bound * 2 for t in range(T): for i in s.modes: for row in range(s.n): Ax = LinExpr() for k in range(s.n): Ax.add(s.A[i][row, k] * x[t][k, 0]) for k in range(s.m): Ax.add(s.B[i][row, k] * u[t][k, 0]) model.addConstr(x[t + 1][row, 0] <= Ax + s.c[i][row] + bigM - bigM * z[t, i]) model.addConstr(x[t + 1][row, 0] >= Ax + s.c[i][row] - bigM + bigM * z[t, i]) # Generator Dynamics Constraints: for t in range(T): for i in s.modes: for row in range(s.n): for column in range(s.n): AG = LinExpr() for k in range(s.n): AG.add(s.A[i][row, k] * G[t][k, column]) for k in range(s.m): AG.add(s.B[i][row, k] * theta[t][k, column]) model.addConstr( G[t + 1][row, column] <= AG + bigM - bigM * z[t, i]) model.addConstr( G[t + 1][row, column] >= AG - bigM + bigM * z[t, i]) # Constraints of mode subsets for t in range(T): for i in s.modes: subset_MILP(model, G[t], s.Pi, s.H[i], s.h[i], x[t], z[t, i]) subset_MILP(model, theta[t], s.Pi, s.F[i], s.f[i], u[t], z[t, i]) # Constraints of modes: for t in range(T + 1): sum_z = LinExpr() for i in s.modes: sum_z.add(z[t, i]) model.addConstr(sum_z == 1, name="sadra%d" % T) model.update() s.core_constraints[T] = model.getConstrs() s.core_Vars[T] = model.getVars() s.library[T] = (model, x, u, G, theta, z)
def __objective_function(self, x, q): m = Model("Overall_Model") CT = {} DT = {} TD = {} #### Add Variable #### for j in range(self.project_n): ## solve individual model get Project complete date CT[j] = self.__optmize_single_project(x, j) ## Project Tadeness,construction completion time DT[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(DT%d)" % j) TD[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(TD%d)" % j) DT[-1] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(DT-1)") ## Review Sequence z_ij z = {} for i in range(self.project_n): for j in range(self.project_n): if i != j: z[i, j] = m.addVar(obj=0, vtype=GRB.BINARY, name="(z%d,%d)" % (i, j)) for j in range(self.project_n): z[-1, j] = m.addVar(obj=0, vtype=GRB.BINARY, name="(z%d,%d)" % (-1, j)) m.update(); #### Add Constraint #### ## Constrain 2: project complete data>due data ## for j in range(self.project_n): m.addConstr(DT[j] - TD[j], GRB.LESS_EQUAL, self.DD[j], name="constraint_2_project_%d" % j) ## Constraint 13 for j in range(self.project_n): m.addConstr(DT[j], GRB.GREATER_EQUAL, CT[j] + self.review_duration[j], name="constraint_13_project_%d" % j) ## Constraint 14 for i in range(-1, self.project_n): for j in range(self.project_n): if i != j: m.addConstr(DT[j], GRB.GREATER_EQUAL, DT[i] - self.M * (1 - z[i, j]) + self.review_duration[j], name="constraint_14_project_%d_project_%d" % (i, j)) ## Constrain 15 for j in range(self.project_n): m.addConstr(quicksum(z[i, j] for i in range(-1, self.project_n) if i != j), GRB.EQUAL, 1, name="constraint_15_project_%d" % j) ## Constrain 16 m.addConstr(quicksum(z[-1, j] for j in range(self.project_n)), GRB.EQUAL, 1, name="constraint_16") ## Constrain 17 for i in range(self.project_n): m.addConstr(quicksum(z[i, j] for j in range(self.project_n) if j != i), GRB.LESS_EQUAL, 1, name="constraint_17_project_%d" % i) m.update() # Set optimization objective - minimize sum of expr = LinExpr() for j in range(self.project_n): expr.add(self.w[j] * TD[j]) m.setObjective(expr, GRB.MINIMIZE) m.update() m.params.presolve = 1 m.update() m.optimize() m.write(join(self.output_dir, "heuristic_whole.lp")) m.write(join(self.output_dir, "heuristic_whole.sol")) print([self.w[j] * TD[j].X for j in range(self.project_n)]) return m.objVal, argmax([self.w[j] * TD[j].X for j in range(self.project_n)])
def __init__(self, n_vertices, edges, constraints, k, minsize, verbosity=0, symmetry_breaking=True, overlap=False, single_cut=False, timeout=None): self.check_graph(n_vertices, edges) self.n_vertices = n_vertices self.k = k self.verbosity = verbosity self.timeout = timeout model = Model('graph_clustering') mvars = [] for i in range(k): cvars = [] for j in range(n_vertices): v = model.addVar(lb=0.0, ub=1.0, vtype=GRB.BINARY) cvars.append(v) mvars.append(cvars) model.update() ineq_sense = GRB.GREATER_EQUAL if overlap else GRB.EQUAL # constraint: each vertex in exactly/at least one cluster for v in range(n_vertices): model.addConstr(quicksum([mvars[i][v] for i in range(k)]), ineq_sense, 1) # symmetry-breaking constraints if symmetry_breaking: model.addConstr(mvars[0][0], GRB.EQUAL, 1) for i in range(2, k): model.addConstr( quicksum([mvars[i - 1][j] for j in range(n_vertices)]) <= quicksum([mvars[i][j] for j in range(n_vertices)])) # size constraint for i in range(k): model.addConstr( quicksum([mvars[i][v] for v in range(n_vertices)]) >= minsize) obj_expr = LinExpr() # indicators for violation of cl constraints for (u, v, w) in constraints: for i in range(k): y = model.addVar(lb=0.0, ub=1.0, vtype=GRB.BINARY) model.update() model.addConstr(y >= mvars[i][u] + mvars[i][v] - 1) obj_expr.add(y, w) model.setObjective(obj_expr, GRB.MINIMIZE) model.params.OutputFlag = self.verbosity model.Params.PreCrush = 1 model.Params.LazyConstraints = 1 model._cutfinder = Cut_Finder(n_vertices, edges) model._vars = mvars model._k = k model._relobj = None model._impcounter = 0 model._single_cut = single_cut # runtime information model._root_cuttime = 0 model._tree_cuttime = 0 self.model = model
def constraints_AB_smaller_c(model, A, b, c, epsilon): for row in range(A.shape[0]): lhs = LinExpr() for k in range(A.shape[1]): lhs.add(A[row, k] * b[k, 0]) model.addConstr(lhs <= c[row, 0] * epsilon)
def terminal_constraint_convex_hull(s,x,G,model,list_of_states): """ Facts: Here we use H-rep. Therefore, G_eps is used! Everything is used with bigM """ print("Using Convex Hull Disjunctive Formulation with %d Number of Polytopes"%len(list_of_states)) Lambda={} z_pol={} x_pol={} G_pol={} for y in list_of_states: Lambda[y]=np.empty((y.polytope.H.shape[0],s.Pi.shape[0]),dtype='object') x_pol[y]=np.empty((s.n,1),dtype='object') G_pol[y]=np.empty((s.n,s.n),dtype='object') for row in range(y.polytope.H.shape[0]): for column in range(s.Pi.shape[0]): Lambda[y][row,column]=model.addVar(lb=0) z_pol[y]=model.addVar(vtype=GRB.BINARY) for row in range(s.n): x_pol[y][row,0]=model.addVar(lb=-GRB.INFINITY,ub=GRB.INFINITY) for row in range(s.n): for column in range(s.n): G_pol[y][row,column]=model.addVar(lb=-GRB.INFINITY,ub=GRB.INFINITY) model.update() z_sum=LinExpr() G_sum=np.empty((s.n,s.n),dtype='object') x_sum=np.empty((s.n,1),dtype='object') for row in range(s.n): x_sum[row,0]=LinExpr() for column in range(s.n): G_sum[row,column]=LinExpr() for y in list_of_states: z_sum.add(z_pol[y]) for row in range(s.n): x_sum[row,0].add(x_pol[y][row,0]) for column in range(s.n): G_sum[row,column].add(G_pol[y][row,column]) for row in range(y.polytope.H.shape[0]): for column in range(s.n): s_left=LinExpr() s_right=LinExpr() for k in range(s.Pi.shape[0]): s_left.add(Lambda[y][row,k]*s.Pi[k,column]) for k in range(s.n): s_right.add(y.polytope.H[row,k]*G_pol[y][k,column]) model.addConstr(s_left==s_right) for row in range(y.polytope.H.shape[0]): s_left=LinExpr() s_right=LinExpr() for k in range(s.Pi.shape[0]): s_left.add(Lambda[y][row,k]) for k in range(s.n): s_right.add(y.polytope.H[row,k]*x_pol[y][k,0]) model.addConstr(s_left<=y.polytope.h[row,0]*z_pol[y]-s_right) model.addConstr(z_sum==1) for row in range(s.n): model.addConstr(x_sum[row,0]==x[row,0]) for column in range(s.n): model.addConstr(G_sum[row,column]==G[row,column]) return z_pol
def _objective_function_for_delta_weight(D, delta_weight): m = Model("model_for_supplier_assignment") x = {} q = {} for (r, s, p) in D.supplier_project_shipping: # i resource, j supplier, k project x[r, s, p] = m.addVar(vtype=GRB.BINARY, name="x_%s_%s_%s" % (r, s, p)) q[r, s, p] = m.addVar(vtype=GRB.CONTINUOUS, name="q_%s_%s_%s" % (r, s, p)) AT = {} for j in range(D.project_n): for k in sorted([r for r, p in D.resource_project_demand if p == D.project_list[j]]): AT[j, k] = m.addVar(vtype=GRB.CONTINUOUS, name="AT_%s_%s" % (j, k)) m.update() ## define constraints # constraint 20(3) for (r, s) in D.resource_supplier_capacity: m.addConstr(quicksum(q[r, s, D.project_list[j]] for j in range(D.project_n)), GRB.LESS_EQUAL, D.resource_supplier_capacity[r, s], name="constraint_3_resource_%s_supplier_%s" % (r, s)) # constraint 21(4) 23(6) for (r, p) in D.resource_project_demand: m.addConstr(quicksum(x[r, i, p] for i in D.resource_supplier_list[r]), GRB.EQUAL, 1, name="constraint_6_resource_%s_project_%s" % (r, p)) m.addConstr(quicksum(q[r, i, p] for i in D.resource_supplier_list[r]), GRB.GREATER_EQUAL, D.resource_project_demand[r, p], name="constraint_4_resource_%s_project_%s" % (r, p)) # constraint 22(5) for (i, j, k) in q: # i resource, j supplier, k project m.addConstr(q[i, j, k], GRB.LESS_EQUAL, D.M * x[i, j, k], name="constraint_5_resource_%s_supplier_%s_project_%s" % (i, j, k)) # constraint 7 expr = LinExpr() for (i, j, k) in q: expr = expr + D.c[i, j, k] * q[i, j, k] m.addConstr(expr, GRB.LESS_EQUAL, D.B, name="constraint_7") # constraint 8 for j in range(D.project_n): p = D.project_list[j] project_resources = sorted([r for (r, p_) in D.resource_project_demand.keys() if p_ == p]) for r in project_resources: suppliers = D.resource_supplier_list[r] # print(list(D.supplier_project_shipping.keys())[:10]) # print(D.supplier_project_shipping['NK0g77', 'S1671', 'P1']) # print(list(x.keys())[:10]) # print(x['NK0g77', 'S1671', 'P1']) m.addConstr( quicksum( x[r, s, p] * (D.resource_supplier_release_time[r, s] + D.supplier_project_shipping[r, s, p]) for s in suppliers), GRB.LESS_EQUAL, AT[j, r], name="constraint_8_project_%d_resource_%s_deliver" % (j, r)) m.update() expr = LinExpr() for j in range(D.project_n): for r in sorted([r for (r, p_) in D.resource_project_demand.keys() if p_ == p]): expr.add(delta_weight[_delta_project_idx[j, r]] * AT[j, r]) m.setObjective(expr, GRB.MINIMIZE) m.update() ########################################## m.params.presolve = 1 m.update() # Solve # m.params.presolve=0 m.optimize() print(m.Status) X_ = {} for (i, j, k) in D.supplier_project_shipping: v = m.getVarByName("x_%s_%s_%s" % (i, j, k)) print(v) if v.X == 1: X_[i, j, k] = 1 AT_ = {} for j, r in AT: val = AT[j, r].X if val > 0: AT_[j, r] = val return -_objective_function_for_tardiness(X_, AT_, D),
def optimizeMILP(elements, linklist, destinations, tasklist, time, federates): global storagepenalty, linkcost, epsilon, value, penalty, linkcapacity, elementcapacity print(time, [(task.id, task.element.name) for task in tasklist], [task.init for task in tasklist]) print([(l.source.name, l.destin.name) for l in linklist]) lp = Model('LP') steps = 10 timesteps = range(time, time + steps) trans = [] # trans[t][i][l] transfer task i from link l at time t store = [] # store[i][j] store task i pick = [] # pick[i] if source i picks up the task resolve = [] J = LinExpr() for i, task in enumerate(tasklist): store.insert(i, lp.addVar(vtype=GRB.BINARY)) J.add(store[i], -1 * storagepenalty) r = LinExpr() r.add(store[i], 1) lp.addConstr(r <= 1) # lp.addConstr(r == 0) for i, task in enumerate(tasklist): pick.append(lp.addVar(vtype=GRB.BINARY)) J.add(pick[i], -1) element = task.element r = LinExpr() r.add(pick[i], 1) if task.init < time: lp.addConstr(r == 1) else: lp.addConstr(r <= 1) for i, t in enumerate(timesteps): trans.insert(i, []) resolve.insert(i, []) for k, task in enumerate(tasklist): # print(task.element.name, task.lastelement.name) trans[i].insert(k, []) resolve[i].insert(k, []) for j, e in enumerate(elements): resolve[i][k].insert(j, lp.addVar(vtype=GRB.BINARY)) if e.name in destinations: J.add(resolve[i][k][j], value) else: J.add(resolve[i][k][j], penalty) if i == 0 and (task.expiration <= time): r = LinExpr() element = task.element j, e = next(((a, b) for a, b in enumerate(elements) if b.name == element.name)) r.add(resolve[i][k][j], 1) lp.addConstr(r == 1) for l, link in enumerate(linklist): trans[i][k].insert(l, lp.addVar(vtype=GRB.BINARY)) J.add(trans[i][k][l], -1 * epsilon) r = LinExpr() r.add(trans[i][k][l], 1) lp.addConstr( r <= (1 if (task.size <= (link.capacity - link.size) and link.source.name not in destinations) else 0)) r.add(pick[k], -1) lp.addConstr(r <= 0) r = LinExpr() r.add(sum(trans[i][k])) lp.addConstr(r <= 1) d = link.destin j, e = next(((a, b) for a, b in enumerate(elements) if b.name == d.name)) # print(link.source.name, link.destin.name, d.name, j, e.name) r = LinExpr() r.add(resolve[i][k][j], 1) lp.addConstr(r <= (1 if (d.name in destinations) else 0)) for i, t in enumerate(timesteps): for k, task in enumerate(tasklist): for j, element in enumerate(elements): inlinks = [(l, li) for l, li in enumerate(linklist) if li.destin.name == element.name] outlinks = [(l, li) for l, li in enumerate(linklist) if li.source.name == element.name] # print(i, k, element.name, [e[0] for e in inlinks], [e[0] for e in outlinks]) if i == 0 and element.name == task.element.name: # print("SOURCE:", i, element.name, [e[0] for e in inlinks], [e[0] for e in outlinks]) r = LinExpr() for l, li in outlinks: r.add(trans[i][k][l], -1) r.add(resolve[i][k][j], -1) r.add(store[k], -1) r.add(pick[k], 1) lp.addConstr(r == 0) elif element.name in destinations: r = LinExpr() # r2 = LinExpr() for l, li in inlinks: r.add(trans[i][k][l], 1) r.add(resolve[i][k][j], -1) lp.addConstr(r == 0) else: r = LinExpr() # r2 = LinExpr() for l, li in inlinks: r.add(trans[i][k][l], 1) r.add(resolve[i][k][j], -1) if i < len(timesteps) - 1: for l, li in outlinks: r.add(trans[i + 1][k][l], -1) lp.addConstr(r == 0) # for k, task in enumerate(tasklist): r = LinExpr() r.add(pick[k], -1) r.add(store[k], 1) for j, element in enumerate(elements): for i, t in enumerate(timesteps): r.add(resolve[i][k][j], 1) lp.addConstr(r == 0) for l, li in enumerate(linklist): r = LinExpr() for k in range(len(tasklist)): for i in range(len(timesteps)): r.add(trans[i][k][l]) lp.addConstr(r <= linkcapacity) for j, e in enumerate(elements): r = LinExpr() for k, task in enumerate( [t for t in tasklist if e.name == task.element.name]): r.add(pick[k], 1) for i in range(len(timesteps)): for v in range(len(elements)): r.add(resolve[i][k][v], -1) lp.addConstr(r <= elementcapacity) # for i in range(len(timesteps)): # rl = [LinExpr() for e in elements] # for k, task in enumerate(tasklist): # element = task.element # j, e = next(((a, b) for a, b in enumerate(elements) if b.name == element.name)) # rl[j].add(store[k], 1) # rl[j].add(resolve[0][k][j], -1) # # for r in rl: # lp.addConstr(r <= elementcapacity) for k, task in enumerate(tasklist): r = LinExpr() fedtask = task.element.owner for i in range(len(timesteps)): for l, li in enumerate(linklist): r.add(trans[i][k][l], -1 * (costfunction(fedtask, li) + epsilon)) r.add(task.getValue(time), 1) lp.addConstr(r >= 0) lp.setObjective(J, GRB.MAXIMIZE) lp.setParam('OutputFlag', False) lp.optimize() # print("pick:", pick) # print("store:", store) # print("trans:", trans) # print("resolve:", resolve) # print("sum of trans:", [sum([sum([e.x for e in a]) for a in l]) for l in trans]) for i, task in enumerate(newtasks): if pick[i].x > 0.5: pickTask(task, time) edges = [] for i, t in enumerate(timesteps): for k, task in enumerate(tasklist): for l, link in enumerate(linklist): if trans[i][k][l].x > 0.5: # print('trans is 1') edges.append((link.source.name, link.destin.name)) print(i, task.id, task.element.name, (link.source.name, link.destin.name)) if task.element.owner == link.owner: transTask(task, link, 0, epsilon) else: transTask(task, link, linkcost, epsilon) for j, e in enumerate(elements): if resolve[i][k][j].x > 0.5: print('time ', i, ' resolved task:', task.id, ' element ', j) # if task.expiration <= time: # resolveTask(task, task.penalty) # else: # resolveTask(task, task.value) resolveTask(task, task.getValue(time)) for k, task in enumerate(tasklist): net = 0 fedtask = task.element.owner for i in range(len(timesteps)): for l, li in enumerate(linklist): net -= trans[i][k][l].x * (costfunction(fedtask, li) + epsilon) net += task.getValue(time) # print("task ", task.id, " net value ", net, " is stored:", store[k].x) storedtasks = [] for k, task in enumerate(tasklist): # print([resolve[i][k][j].x for i, j in product(range(len(timesteps)), range(len(elements)))]) if (pick[k].x and store[k].x) and not any([ resolve[i][k][j].x for i, j in product(range(len(timesteps)), range(len(elements))) ]): storedtasks.append(task) return storedtasks, edges
def __init__(self, n_vertices, edges, constraints, k, gamma, verbosity=0, symmetry_breaking=True, overlap=False, timeout=None): self.check_graph(n_vertices, edges) self.n_vertices = n_vertices self.edges = edges self.k = k self.verbosity = verbosity self.timeout = timeout self.create_graph() self.model = Model('graph_clustering') self.model.params.updatemode = 1 self.mvars = [] for i in range(k): cvars = [] for j in range(n_vertices): v = self.model.addVar(lb=0.0, ub=1.0, vtype=GRB.BINARY) cvars.append(v) self.mvars.append(cvars) ineq_sense = GRB.GREATER_EQUAL if overlap else GRB.EQUAL # constraint: each vertex in exactly/at least one cluster for v in range(n_vertices): self.model.addConstr( quicksum([self.mvars[i][v] for i in range(k)]), ineq_sense, 1) # connectivity constraints: for v1 in range(n_vertices): for v2 in range(v1 + 1, n_vertices): if (v1, v2) in self.edges: continue for i in range(k): cvars = self.connectivity_vars(i, v1, v2) self.model.addConstr(self.mvars[i][v1] + self.mvars[i][v2], GRB.LESS_EQUAL, quicksum(cvars) + 1) # symmetry-breaking constraints if symmetry_breaking: self.model.addConstr(self.mvars[0][0], GRB.EQUAL, 1) for i in range(2, k): self.model.addConstr( quicksum([self.mvars[i - 1][j] for j in range(n_vertices)]), GRB.LESS_EQUAL, quicksum([self.mvars[i][j] for j in range(n_vertices)])) obj_expr = LinExpr() wsum = sum(w for (_, _, w) in constraints) gamma = gamma / wsum # indicators for violation of cl constraints for (u, v, w) in constraints: for i in range(k): y = self.model.addVar(lb=0.0, ub=1.0, vtype=GRB.BINARY) self.model.addConstr( y >= self.mvars[i][u] + self.mvars[i][v] - 1) obj_expr.add(y, -w * gamma) # size of smallest cluster s = self.model.addVar(lb=0.0, ub=n_vertices, vtype=GRB.INTEGER) for i in range(k): self.model.addConstr( s <= quicksum([self.mvars[i][v] for v in range(n_vertices)])) s_coef = 1 / n_vertices if overlap else k / n_vertices obj_expr.add(s_coef * s) self.model.setObjective(obj_expr, GRB.MAXIMIZE) self.model.update() self.model.params.OutputFlag = self.verbosity
def _objective_function_for_delta_weight(D, delta_weight, d1, d2): global _time_limit_per_model, _round, _pr_dataset, _tardiness_objective_dataset m = Model("model_for_supplier_assignment") m.setParam('OutputFlag', False) m.params.timelimit = _time_limit_per_model # m.params.IntFeasTol = 1e-7 x = {} q = {} for (r, s, p) in D.supplier_project_shipping: x[r, s, p] = m.addVar(vtype=GRB.BINARY, name="x_%s_%s_%s" % (r, s, p)) q[r, s, p] = m.addVar(vtype=GRB.CONTINUOUS, name="q_%s_%s_%s" % (r, s, p)) AT = {} for j in range(D.project_n): for k in [r for r, p in D.resource_project_demand if p == D.project_list[j]]: AT[j, k] = m.addVar(vtype=GRB.CONTINUOUS, name="AT_%s_%s" % (j, k)) m.update() ## define constraints # equation 2 for (r, s) in D.resource_supplier_capacity: m.addConstr(quicksum(q[r, s, D.project_list[j]] for j in range(D.project_n)), GRB.LESS_EQUAL, D.resource_supplier_capacity[r, s], name="constraint_3_resource_%s_supplier_%s" % (r, s)) # constraint 21(4) 23(6) for (r, p) in D.resource_project_demand: # equation 5 m.addConstr(quicksum(x[r, i, p] for i in D.resource_supplier_list[r]), GRB.EQUAL, 1, name="constraint_6_resource_%s_project_%s" % (r, p)) # equation 3 m.addConstr(quicksum(q[r, i, p] for i in D.resource_supplier_list[r]), GRB.GREATER_EQUAL, D.resource_project_demand[r, p], name="constraint_4_resource_%s_project_%s" % (r, p)) # constraint 22(5) for (i, j, k) in q: # i resource, j supplier, k project # equation 4 m.addConstr(q[i, j, k], GRB.LESS_EQUAL, D.M * x[i, j, k], name="constraint_5_resource_%s_supplier_%s_project_%s" % (i, j, k)) # constraint 7 shipping_cost_expr = LinExpr() for (i, j, k) in q: shipping_cost_expr.addTerms(D.c[i, j, k], q[i, j, k]) # equation 6 m.addConstr(shipping_cost_expr, GRB.LESS_EQUAL, D.B, name="constraint_7") # constraint 8 # equation 26 for j in range(D.project_n): p = D.project_list[j] project_resources = [r for (r, p_) in D.resource_project_demand.keys() if p_ == p] for r in project_resources: suppliers = D.resource_supplier_list[r] m.addConstr( quicksum( x[r, s, p] * (D.resource_supplier_release_time[r, s] + D.supplier_project_shipping[r, s, p]) for s in suppliers), GRB.LESS_EQUAL, AT[j, r], name="constraint_8_project_%d_resource_%s_deliver" % (j, r)) m.update() expr = LinExpr() for j in range(D.project_n): p = D.project_list[j] for r in [r for (r, p_) in D.resource_project_demand.keys() if p_ == p]: expr.add(delta_weight[j, r] * AT[j, r]) m.setObjective(expr, GRB.MINIMIZE) m.update() ########################################## # m.params.presolve = 1 m.update() # Solve # m.params.presolve=0 m.optimize() _exit_if_infeasible(m) m.write(join(_result_output_path, "round_%d_supplier_assign.lp" % _round)) m.write(join(_result_output_path, "round_%d_supplier_assign.sol" % _round)) with open(join(log_output_path, 'shipping_cost.txt'), 'a') as fout: fout.write('shipping cost: %f\n' % shipping_cost_expr.getValue()) _logger.info('shipping cost: %f' % shipping_cost_expr.getValue()) print('status', m.status) # m.write(join(_output_path, 'delta_weight.sol')) # m.write(join(_output_path, 'delta_weight.lp')) X_ = {} for (i, j, k) in D.supplier_project_shipping: v = m.getVarByName("x_%s_%s_%s" % (i, j, k)) if v.X == 1: X_[i, j, k] = 1 AT_ = {} for j, r in AT: val = AT[j, r].X if val > 0: AT_[j, r] = val tardiness_obj_val, skj, sj = _objective_function_for_tardiness(X_, AT_, D) new_delta_weight = {} # delta_weight_keys = list(delta_weight.keys()) # delta_weight_keys.sort(key=lambda x: x[1]) # delta_weight_keys.sort(key=lambda x: x[0]) for j, r in delta_weight.keys(): new_delta_weight[j, r] = delta_weight[j, r] * (1 + d1 * (d2 + sj.get(j, 0)) * skj.get((j, r), 0)) # print('j', type(j), j) # print('r', type(r), r) # print('previous weight', type(delta_weight[j, r]), delta_weight[j, r]) # print('d1', type(d1), d1) # print('d2', type(d2), d2) # print('sj', type(sj.get(j, 0)), sj.get(j, 0)) # print('skj', type(skj.get((j, r))), skj.get((j, r))) # print('new weight', type(new_delta_weight[j, r]), new_delta_weight[j, r]) _logger.info( 'r[%d,%s] = %f *(1+%f*(%f+%f)*%f) = %f' % ( j, r, delta_weight[j, r], d1, d2, sj.get(j, 0), skj.get((j, r), 0), new_delta_weight[j, r])) # new_delta_weight[j, r] = 1 _normalize(new_delta_weight) for j, r in new_delta_weight.keys(): # _logger.info('j:' + str(j)) # _logger.info('r:' + str(r)) # _logger.info(str([_round, j, r, new_delta_weight[j, r]])) _weight_dataset.loc[_weight_dataset.shape[0]] = [_round, j, r, new_delta_weight[j, r]] for j in range(D.project_n): _pr_dataset.loc[_pr_dataset.shape[0]] = [_round, j, sj.get(j, 0)] _tardiness_objective_dataset.loc[_tardiness_objective_dataset.shape[0]] = [_round, tardiness_obj_val] return new_delta_weight
def _objective_function_for_tardiness(x, AT, D): global _last_CT, _last_x, _pool, _time_limit_per_model m = Model("Overall_Model") m.params.timelimit = _time_limit_per_model # m.setParam('OutputFlag', False) # m.params.IntFeasTol = 1e-7 CT = {} # CT_ASYNC = dict() # project_to_recompute, project_no_recompute = get_project_to_recompute(x, D.project_n, D.project_list) project_suppliers = _get_project_suppliers_map(x, D.project_list) critical_project_resource = dict() # for j in range(D.project_n): # p = D.project_list[j] # CT_ASYNC[j] = _pool.apply_async(optimize_single_project, # (AT, j, D.project_list, D.project_activity, D.M)) # # for j in CT_ASYNC: # p = D.project_list[j] # CT[j], skj = CT_ASYNC[j].get() # _put_CT(p, project_suppliers[p], CT[j]) # _put_historical_delta_weight_idx_map(p, project_suppliers[p], skj) # critical_project_resource.update(skj) for j in range(D.project_n): p = D.project_list[j] CT[j], skj = optimize_single_project(AT, j, D.project_list, D.project_activity, D.M) _put_CT(p, project_suppliers[p], CT[j]) _put_historical_delta_weight_idx_map(p, project_suppliers[p], skj) critical_project_resource.update(skj) DT = {} TD = {} for j in range(D.project_n): ## Project Tadeness,construction completion time DT[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="DT_%d" % j) TD[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="TD_%d" % j) DT[-1] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="DT_-1") ## Review Sequence z_ij z = {} for i in range(D.project_n): for j in range(D.project_n): if i != j: z[i, j] = m.addVar(obj=0, vtype=GRB.BINARY, name="z_%d_%d" % (i, j)) for j in range(D.project_n): z[-1, j] = m.addVar(obj=0, vtype=GRB.BINARY, name="z_%d_%d" % (-1, j)) m.update(); #### Add Constraint #### ## Constrain 2: project complete data>due data ## # equation 17 for j in range(D.project_n): m.addConstr(DT[j] - TD[j], GRB.LESS_EQUAL, D.DD[j], name="constraint_2_project_%d" % j) ## Constraint 13 # equation 12 for j in range(D.project_n): m.addConstr(DT[j], GRB.GREATER_EQUAL, CT[j] + D.review_duration[j], name="constraint_13_project_%d" % j) ## Constraint 14 # equation 13 for i in range(-1, D.project_n): for j in range(D.project_n): if i != j: m.addConstr(DT[j], GRB.GREATER_EQUAL, DT[i] - D.M * (1 - z[i, j]) + D.review_duration[j], name="constraint_14_project_%d_project_%d" % (i, j)) ## Constrain 15 # equation 14 for j in range(D.project_n): m.addConstr(quicksum(z[i, j] for i in range(-1, D.project_n) if i != j), GRB.EQUAL, 1, name="constraint_15_project_%d" % j) ## Constrain 16 # equation 15 m.addConstr(quicksum(z[-1, j] for j in range(D.project_n)), GRB.EQUAL, 1, name="constraint_16") ## Constrain 17 # equation 16 for i in range(D.project_n): m.addConstr(quicksum(z[i, j] for j in range(D.project_n) if j != i), GRB.LESS_EQUAL, 1, name="constraint_17_project_%d" % i) m.update() # Set optimization objective - minimize sum of expr = LinExpr() for j in range(D.project_n): expr.add(D.w[j] * TD[j]) m.setObjective(expr, GRB.MINIMIZE) m.update() # m.params.presolve = 1 m.update() m.optimize() m.write(join(_result_output_path, 'round_%d_tardiness.sol' % _round)) m.write(join(_result_output_path, 'round_%d_tardiness.lp' % _round)) z_ = {} for i, j in z: z_[i, j] = z[i, j].X critical_projs = _sensitivity_analysis_for_tardiness(z_, CT, D) _tardiness_obj_trace.append(m.objVal) _gap_trace.append(m.MIPGap) return m.objVal, critical_project_resource, critical_projs
def _sensitivity_analysis_for_tardiness(z, CT, D): m = Model("model_for_sensitivity_analysis_for_tardiness") m.setParam('OutputFlag', False) # m.params.IntFeasTol = 1e-7 DT = {} TD = {} for j in range(D.project_n): ## Project Tadeness,construction completion time DT[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="DT_%d" % j) TD[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="TD_%d" % j) DT[-1] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="DT_-1") m.update(); #### Add Constraint #### ## Constrain 2: project complete data>due data ## # equation 17 for j in range(D.project_n): m.addConstr(DT[j] - TD[j], GRB.LESS_EQUAL, D.DD[j], name="constraint_2_project_%d" % j) ## Constraint 13 # equation 12 for j in range(D.project_n): m.addConstr(DT[j], GRB.GREATER_EQUAL, CT[j] + D.review_duration[j], name="constraint_13_project_%d" % j) ## Constraint 14 # equation 13 for i in range(-1, D.project_n): for j in range(D.project_n): if i != j: m.addConstr(DT[j], GRB.GREATER_EQUAL, DT[i] - D.M * (1 - z[i, j]) + D.review_duration[j], name="constraint_14_project_%d_project_%d" % (i, j)) m.update() # Set optimization objective - minimize sum of expr = LinExpr() for j in range(D.project_n): expr.add(D.w[j] * TD[j]) m.setObjective(expr, GRB.MINIMIZE) m.update() # m.params.presolve = 1 m.update() m.optimize() # _logger.info("mm binding info:") sj = {} for c in m.getConstrs(): if c.ConstrName.startswith('constraint_13'): j = int(c.ConstrName.split('_')[-1]) # if c.Pi != 0: # sj[j] = 1 # _logger.info('%s binding Pi:%.4g' % (c.ConstrName, c.Pi)) # pass # else: # sj[j] = 0 # _logger.info('%s not binding Pi:%.4g' % (c.ConstrName, c.Pi)) # pass sj[j] = c.Pi return sj
def __objective_function(self, x, q, p_changed=None): m = Model("Overall_Model") if p_changed is not None: j0 = self.project_list.index(p_changed) CT = self.last_CT CT[j0] = self.optmize_single_project(x, j0, self.project_list, self.project_activity, self.resource_supplier_list, self.resource_supplier_release_time, self.supplier_project_shipping, self.M, self.output_dir) else: CT = {} CT_ASYNC = dict() for j in range(self.project_n): ## solve individual model get Project complete date CT_ASYNC[j] = self.pool.apply_async(HeuristicParallelModel.optmize_single_project, (x, j, self.project_list, self.project_activity, self.resource_supplier_list, self.resource_supplier_release_time, self.supplier_project_shipping, self.M, self.output_dir)) for j in range(self.project_n): CT[j] = CT_ASYNC[j].get() self.last_CT = deepcopy(CT) DT = {} TD = {} for j in range(self.project_n): ## Project Tadeness,construction completion time DT[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(DT%d)" % j) TD[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(TD%d)" % j) DT[-1] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(DT-1)") ## Review Sequence z_ij z = {} for i in range(self.project_n): for j in range(self.project_n): if i != j: z[i, j] = m.addVar(obj=0, vtype=GRB.BINARY, name="(z%d,%d)" % (i, j)) for j in range(self.project_n): z[-1, j] = m.addVar(obj=0, vtype=GRB.BINARY, name="(z%d,%d)" % (-1, j)) m.update(); #### Add Constraint #### ## Constrain 2: project complete data>due data ## for j in range(self.project_n): m.addConstr(DT[j] - TD[j], GRB.LESS_EQUAL, self.DD[j], name="constraint_2_project_%d" % j) ## Constraint 13 for j in range(self.project_n): m.addConstr(DT[j], GRB.GREATER_EQUAL, CT[j] + self.review_duration[j], name="constraint_13_project_%d" % j) ## Constraint 14 for i in range(-1, self.project_n): for j in range(self.project_n): if i != j: m.addConstr(DT[j], GRB.GREATER_EQUAL, DT[i] - self.M * (1 - z[i, j]) + self.review_duration[j], name="constraint_14_project_%d_project_%d" % (i, j)) ## Constrain 15 for j in range(self.project_n): m.addConstr(quicksum(z[i, j] for i in range(-1, self.project_n) if i != j), GRB.EQUAL, 1, name="constraint_15_project_%d" % j) ## Constrain 16 m.addConstr(quicksum(z[-1, j] for j in range(self.project_n)), GRB.EQUAL, 1, name="constraint_16") ## Constrain 17 for i in range(self.project_n): m.addConstr(quicksum(z[i, j] for j in range(self.project_n) if j != i), GRB.LESS_EQUAL, 1, name="constraint_17_project_%d" % i) m.update() # Set optimization objective - minimize sum of expr = LinExpr() for j in range(self.project_n): expr.add(self.w[j] * TD[j]) m.setObjective(expr, GRB.MINIMIZE) m.update() m.params.presolve = 1 m.update() m.optimize() m.write(join(self.output_dir, "heuristic_whole.lp")) m.write(join(self.output_dir, "heuristic_whole.sol")) self.obj_value_trace.append(m.objVal) return m.objVal, argmax([self.w[j] * TD[j].X for j in range(self.project_n)])
def optimizeMILP(elements, linklist, destinations, storedtasks, newtasks, time, federates, edgePriceDict, solutionObj): global storagepenalty, epsilon, linkcapacity, elementcapacity # print("MILP solution bid dict:", solutionObj.fedBidDict) tasklist = storedtasks + newtasks lp = Model('LP') steps = 10 timesteps = range(time, time + steps) trans = [] # trans[t][i][l] transfer task i from link l at time t store = [] # store[i][j] store task i pick = [] # pick[i] if source i picks up the task resolve = [] J = LinExpr() for i, task in enumerate(tasklist): store.insert(i, lp.addVar(vtype=GRB.BINARY)) J.add(store[i], -1* storagepenalty) r = LinExpr() r.add(store[i], 1) lp.addConstr(r <= 1) # lp.addConstr(r == 0) for i, task in enumerate(tasklist): pick.append(lp.addVar(vtype=GRB.BINARY)) J.add(pick[i], -1) element = task.element r = LinExpr() r.add(pick[i], 1) if task.init < time: lp.addConstr(r == 1) else: lp.addConstr(r <= 1) for i, t in enumerate(timesteps): trans.insert(i, []) resolve.insert(i, []) for k, task in enumerate(tasklist): trans[i].insert(k, []) resolve[i].insert(k, []) for j, e in enumerate(elements): resolve[i][k].insert(j, lp.addVar(vtype=GRB.BINARY)) if e.name in destinations: J.add(resolve[i][k][j], task.maxvalue) else: J.add(resolve[i][k][j], task.penalty) if i == 0 and (task.expiration <= time): r = LinExpr() element = task.element j, e = next(((a, b) for a, b in enumerate(elements) if b.name == element.name)) r.add(resolve[i][k][j], 1) lp.addConstr(r == 1) for l, link in enumerate(linklist): trans[i][k].insert(l, lp.addVar(vtype=GRB.BINARY)) J.add(trans[i][k][l], -1*epsilon) r = LinExpr() r.add(trans[i][k][l], 1) lp.addConstr(r <= (1 if (task.size <= (link.capacity - link.size) and link.source.name not in destinations) else 0)) r.add(pick[k], -1) lp.addConstr(r <= 0) r = LinExpr() r.add(sum(trans[i][k])) lp.addConstr(r <= 1) d = link.destin j, e = next(((a, b) for a, b in enumerate(elements) if b.name == d.name)) r = LinExpr() r.add(resolve[i][k][j], 1) lp.addConstr(r <= (1 if (d.name in destinations) else 0)) for i, t in enumerate(timesteps): for k, task in enumerate(tasklist): for j, element in enumerate(elements): inlinks = [(l, li) for l, li in enumerate(linklist) if li.destin.name == element.name] outlinks = [(l, li) for l, li in enumerate(linklist) if li.source.name == element.name] if i == 0 and element.name == task.element.name: r = LinExpr() for l, li in outlinks: r.add(trans[i][k][l], -1) r.add(resolve[i][k][j], -1) r.add(store[k], -1) r.add(pick[k], 1) lp.addConstr(r == 0) elif element.name in destinations: r = LinExpr() # r2 = LinExpr() for l, li in inlinks: r.add(trans[i][k][l], 1) r.add(resolve[i][k][j], -1) lp.addConstr(r == 0) else: r = LinExpr() # r2 = LinExpr() for l, li in inlinks: r.add(trans[i][k][l], 1) r.add(resolve[i][k][j], -1) if i< len(timesteps) - 1: for l, li in outlinks: r.add(trans[i+1][k][l], -1) lp.addConstr(r == 0) # for k, task in enumerate(tasklist): r = LinExpr() r.add(pick[k], -1) r.add(store[k], 1) for j, element in enumerate(elements): for i, t in enumerate(timesteps): r.add(resolve[i][k][j], 1) lp.addConstr(r == 0) for l, li in enumerate(linklist): r = LinExpr() for k in range(len(tasklist)): for i in range(len(timesteps)): r.add(trans[i][k][l]) lp.addConstr(r <= linkcapacity) for j, e in enumerate(elements): r = LinExpr() for k, task in enumerate([t for t in tasklist if e.name == task.element.name]): r.add(pick[k], 1) for i in range(len(timesteps)): for v in range(len(elements)): r.add(resolve[i][k][v], -1) lp.addConstr(r <= elementcapacity) # for i in range(len(timesteps)): # rl = [LinExpr() for e in elements] # for k, task in enumerate(tasklist): # element = task.element # j, e = next(((a, b) for a, b in enumerate(elements) if b.name == element.name)) # rl[j].add(store[k], 1) # rl[j].add(resolve[0][k][j], -1) # # for r in rl: # lp.addConstr(r <= elementcapacity) for k, task in enumerate(tasklist): r = LinExpr() fedtask = task.element.owner for i in range(len(timesteps)): for l, li in enumerate(linklist): r.add(trans[i][k][l], -1*costfunction(fedtask, li, edgePriceDict)) # r.add(task.getValue(time), 1) # r.add(fedtask.uselinkcost, 1) r.add(min(task.getValue(time), solutionObj.fedBidDict[fedtask.name][1]), 1) lp.addConstr(r >= 0) lp.setObjective(J, GRB.MAXIMIZE) lp.setParam('OutputFlag', False) lp.optimize() for i, task in enumerate(newtasks): if pick[i].x>0.5: pickTask(task, time) edges = [] sourceEdgeDict = defaultdict(list) for i, t in enumerate(timesteps): for k, task in enumerate(tasklist): for l, link in enumerate(linklist): if trans[i][k][l].x>0.5: edges.append((link.source.name, link.destin.name)) sourceEdgeDict[task.element.name].append((link.source.name, link.destin.name)) if task.element.owner == link.owner: transTask(task, link, epsilon, solutionObj) else: transTask(task, link, costfunction(task.element.owner, link, edgePriceDict), solutionObj) for j, e in enumerate(elements): if resolve[i][k][j].x>0.5: # if task.expiration <= time: # resolveTask(task, task.penalty) # else: # resolveTask(task, task.value) resolveTask(task, task.getValue(time), solutionObj) for k, task in enumerate(tasklist): net = 0 fedtask = task.element.owner for i in range(len(timesteps)): for l, li in enumerate(linklist): net -= trans[i][k][l].x * costfunction(fedtask, li, edgePriceDict) net += task.getValue(time) storedtasks = [] for k, task in enumerate(tasklist): if (pick[k].x and store[k].x) and not any([resolve[i][k][j].x for i, j in product(range(len(timesteps)), range(len(elements)))]): storedtasks.append(task) # solutionObj.edgelist.extend(edges) solutionObj.sourceEdgeDict = sourceEdgeDict return solutionObj
def _objective_function_for_tardiness(x, AT, D): global _last_CT, _last_x, _pool m = Model("Overall_Model") CT = {} CT_ASYNC = dict() # project_to_recompute, project_no_recompute = get_project_to_recompute(x, D.project_n, D.project_list) project_suppliers = _get_project_suppliers_map(x, D.project_list) for j in range(D.project_n): p = D.project_list[j] history_CT = _get_CT(p, project_suppliers[p]) if history_CT is None: CT[j] = optmize_single_project(AT, j, D.project_list, D.project_activity, D.M) # CT_ASYNC[j] = _pool.apply_async(optmize_single_project, # (AT, j, D.project_list, D.project_activity, D.M)) else: # logging.info('%s [%s]' % (p, ','.join(sorted(project_suppliers[p])))) CT[j] = history_CT # logging.info('project %s get historical value %f.' % (p, history_CT)) for j in CT_ASYNC: CT[j] = CT_ASYNC[j].get() p = D.project_list[j] _put_CT(p, project_suppliers[p], CT[j]) # _last_CT = deepcopy(CT) # _last_x = deepcopy(x) # if len(project_no_recompute) == D.project_n: # return _tardiness_obj_trace[-1] # self.last_CT = deepcopy(CT) DT = {} TD = {} for j in range(D.project_n): ## Project Tadeness,construction completion time DT[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(DT%d)" % j) TD[j] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(TD%d)" % j) DT[-1] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(DT-1)") ## Review Sequence z_ij z = {} for i in range(D.project_n): for j in range(D.project_n): if i != j: z[i, j] = m.addVar(obj=0, vtype=GRB.BINARY, name="(z%d,%d)" % (i, j)) for j in range(D.project_n): z[-1, j] = m.addVar(obj=0, vtype=GRB.BINARY, name="(z%d,%d)" % (-1, j)) m.update(); #### Add Constraint #### ## Constrain 2: project complete data>due data ## for j in range(D.project_n): m.addConstr(DT[j] - TD[j], GRB.LESS_EQUAL, D.DD[j], name="constraint_2_project_%d" % j) ## Constraint 13 for j in range(D.project_n): m.addConstr(DT[j], GRB.GREATER_EQUAL, CT[j] + D.review_duration[j], name="constraint_13_project_%d" % j) ## Constraint 14 for i in range(-1, D.project_n): for j in range(D.project_n): if i != j: m.addConstr(DT[j], GRB.GREATER_EQUAL, DT[i] - D.M * (1 - z[i, j]) + D.review_duration[j], name="constraint_14_project_%d_project_%d" % (i, j)) ## Constrain 15 for j in range(D.project_n): m.addConstr(quicksum(z[i, j] for i in range(-1, D.project_n) if i != j), GRB.EQUAL, 1, name="constraint_15_project_%d" % j) ## Constrain 16 m.addConstr(quicksum(z[-1, j] for j in range(D.project_n)), GRB.EQUAL, 1, name="constraint_16") ## Constrain 17 for i in range(D.project_n): m.addConstr(quicksum(z[i, j] for j in range(D.project_n) if j != i), GRB.LESS_EQUAL, 1, name="constraint_17_project_%d" % i) m.update() # Set optimization objective - minimize sum of expr = LinExpr() for j in range(D.project_n): expr.add(D.w[j] * TD[j]) m.setObjective(expr, GRB.MINIMIZE) m.update() m.params.presolve = 1 m.update() m.optimize() # m.write(join(self.output_dir, "heuristic_whole.lp")) # m.write(join(self.output_dir, "heuristic_whole.sol")) # self.obj_value_trace.append(m.objVal) _tardiness_obj_trace.append(m.objVal) # logging.info("Tardiness value:%r" % m.objVal) return m.objVal
def optmize_single_project(AT, j, project_list, project_activity, M): m = Model("SingleProject_%d" % j) m.setParam(GRB.Param.Method, 0) m.update() #### Create variables #### project = project_list[j] ## Project complete data,Project Tadeness,construction completion time CT = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(CT%d)" % j) ## Activity start time ST = {} project_activities = project_activity[project] # print(project_activities.nodes()) for row in project_activities.nodes(): ST[row] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(ST%d,%s)" % (j, row)) ## Review sequence z_ij ## move to annealing objective function # y y = {} for activity_i in project_activities.nodes(): for activity_j in project_activities.nodes(): # print(project_activities.node[activity_i]) # print(dir(project_activities.node[activity_i])) if activity_i != activity_j and len(list( set(project_activities.node[activity_i]['rk_resources']).intersection( project_activities.node[activity_j]['rk_resources']))) > 0: y[activity_i, activity_j] = m.addVar(obj=0, vtype=GRB.BINARY, name="(y%d,%s,%s)" % (j, activity_i, activity_j)) m.update() #### Create constrains #### ## Constrain 2: project complete data>due data ## move to annealing objective function ## Constrain 3: supplier capacity limit ## move to annealing neighbor & random generator ## Constrain 4,6: project demand require; each project receive from one supplier for each resource ## move to annealing neighbor & random generator ## constrain 5: shipping constrain ## move to annealing neighbor & random generator ## Constrain 7:budget limit ## move to annealing constraint valid ## Constrain 8: activity starting constrain for a in project_activities.nodes(): for r in project_activities.node[a]['resources']: m.addConstr(AT[j, r], GRB.LESS_EQUAL, ST[a], name="constraint_8_project_%d_activity_%s_resource_%s" % (j, a, r)) ## Constrain 9 activity sequence constrain for row1, row2 in project_activities.edges(): m.addConstr(ST[row1] + project_activities.node[row1]['duration'], GRB.LESS_EQUAL, ST[row2], name="constraint_9_project_%d_activity_%s_activity_%s" % (j, row1, row2)) ## Constrain 10,11 for row1 in project_activities.nodes(): for row2 in project_activities.nodes(): if row1 != row2 and len(list( set(project_activities.node[row1]['rk_resources']).intersection( project_activities.node[row2]['rk_resources']))) > 0: m.addConstr(ST[row1] + project_activities.node[row1]['duration'] - M * ( 1 - y[row1, row2]), GRB.LESS_EQUAL, ST[row2], name="constraint_10_project_%d_activity_%s_activity_%s" % (j, row1, row2)) m.addConstr( ST[row2] + project_activities.node[row2]['duration'] - M * (y[row1, row2]), GRB.LESS_EQUAL, ST[row1], name="constraint_11_project_%d_activity_%s_activity_%s" % (j, row1, row2)) # m.addConstr(y[j,row1,row2]+y[j,row2,row1],GRB.LESS_EQUAL,1) ## Constrain 12 for row in project_activities.nodes(): m.addConstr(CT, GRB.GREATER_EQUAL, ST[row] + project_activities.node[row]['duration'], name="constraint_12_project_%d_activity_%s" % (j, row)) ## Constrain 13 ## move to anealing objective function ## Constrain 14 ## move to anealing objective function ## Constrain 15 ## move to anealing objective function ## Constrain 16 ## move to anealing objective function ## Constrain 17 ## move to anealing objective function m.update() # Set optimization objective - minimize completion time expr = LinExpr() expr.add(CT) m.setObjective(expr, GRB.MINIMIZE) m.update() ########################################## m.params.presolve = 1 m.update() # Solve # m.params.presolve=0 m.optimize() # m.write(join(output_dir, "heuristic_%d.lp" % j)) # m.write(join(output_dir, "heuristic_%d.sol" % j)) # logging.info("project %d with optimalVal %r" % (j, m.objVal)) print(m.status == GRB.OPTIMAL) for c in m.getConstrs(): if c.ConstrName.startswith('constraint_8_project'): c.getAttr(GRB.Attr.SARHSLow) logging.info('%s shadow price (%f,%f)' % (c.ConstrName, c.SARHSLow, c.SARHSUp)) return m.objVal
def populate_master(data, open_arcs=None): """ Function that populates the Benders Master problem :param data: Problem data structure :param open_arcs: If given, it is a MIP start feasible solution :rtype: Gurobi model object """ master = Model('master-model') arcs, periods = xrange(data.arcs.size), xrange(data.periods) commodities = xrange(data.commodities) graph, origins, destinations = data.graph, data.origins, data.destinations variables = np.empty(shape=(data.periods, data.arcs.size), dtype=object) bin_vars_idx = np.empty_like(variables, dtype=int) continuous_variables = np.empty(shape=(len(periods), len(commodities)), dtype=object) cont_vars_idx = np.empty_like(continuous_variables, dtype=int) start_given = open_arcs is not None count = 0 # length of shortest path, shortest path itself arc_com, arc_obj = [], [] lbs = [ shortest_path_length(graph, origins[com], destinations[com], 'weight') for com in commodities ] sps = [ shortest_path(graph, origins[com], destinations[com], 'weight') for com in commodities ] # resolve sp by removing one arc, check the increase in value for com in commodities: incr, best_arc = 0., 0 for n1, n2 in zip(sps[com], sps[com][1:]): weight = graph[n1][n2]['weight'] graph[n1][n2]['weight'] = 10000. * weight spl = shortest_path_length(graph, origins[com], destinations[com], 'weight') if spl > incr: incr = spl best_arc = graph[n1][n2]['arc_id'] graph[n1][n2]['weight'] = weight arc_com.append(best_arc) arc_obj.append(spl) # Add variables for period in periods: for arc in arcs: # Binary arc variables variables[period, arc] = master.addVar(vtype=GRB.BINARY, obj=data.fixed_cost[period, arc], name='arc_open{}_{}'.format( period, arc)) bin_vars_idx[period, arc] = count count += 1 for com in commodities: lb = lbs[com] * data.demand[period, com] # Continuous flow_cost variables (eta) continuous_variables[period, com] = master.addVar( lb=lb, obj=1., vtype=GRB.CONTINUOUS, name='flow_cost{}'.format((period, com))) cont_vars_idx[period, com] = count count += 1 master.update() # If feasible solution is given, use it as a start if start_given: for period in periods: for arc in arcs: # variables[period, arc].start = open_arcs[period, arc] variables[period, arc].VarHintVal = open_arcs[period, arc] variables[period, arc].VarHintPri = 1 # Add constraints # Add Origin - Destination Cuts for each Commodity cuts_org, cuts_dest = set(), set() for commodity in commodities: arc_origin = data.origins[commodity] arc_destination = data.destinations[commodity] if arc_origin not in cuts_org: out_origin = get_2d_index(data.arcs, data.nodes)[0] - 1 == arc_origin master.addConstr(lhs=np.sum(variables[0, out_origin]), rhs=1., sense=GRB.GREATER_EQUAL, name='origins_c{}'.format(commodity)) cuts_org.add(arc_origin) if arc_destination not in cuts_dest: in_dest = get_2d_index(data.arcs, data.nodes)[1] - 1 == arc_destination master.addConstr(lhs=np.sum(variables[0, in_dest]), rhs=1., sense=GRB.GREATER_EQUAL, name='destinations_c{}'.format(commodity)) cuts_dest.add(arc_destination) # Add that an arc can open at most once for arc in arcs: master.addSOS(GRB.SOS_TYPE1, variables[:, arc].tolist(), list(periods)[::-1]) # Add extra constraints for lower bound improvement for com in commodities: arc = arc_com[com] base_coeffs = lbs[com] - arc_obj[com] for period in periods: lhs = LinExpr() coeffs = [ cf * data.demand[period, com] for cf in [base_coeffs] * (period + 1) ] lhs.addTerms(coeffs, variables[:period + 1, arc].tolist()) lhs.add(-continuous_variables[period, com]) lhs.addConstant(arc_obj[com] * data.demand[period, com]) master.addConstr(lhs, sense=GRB.LESS_EQUAL, rhs=0, name='strengthening_{}{}'.format(period, com)) master.params.LazyConstraints = 1 # Find feasible solutions quickly, works better master.params.TimeLimit = 7200 master.params.threads = 2 master.params.BranchDir = 1 # Store the variables inside the model, we cannot access them later! master._variables = np.array(master.getVars()) master._cont_vars_idx = cont_vars_idx master._bin_vars_idx = bin_vars_idx return master
def construct_model(self): self.m = Model('mip1.log') # Create model variables self.solution_matrix = {(st, c, se): self.m.addVar(vtype=GRB.BINARY, name='{}_{}_{}'.format(st, c, se)) for st in self.student_ids for c in self.course_ids for se in self.semester_ids} self.m.update() # Constraint #1: Class availability and size for course in self.course_ids: for semester in self.semester_ids: class_size_expr = LinExpr() for student in self.student_ids: class_size_expr.add(self.solution_matrix[student, course, semester]) if not self.is_course_offered(course, semester): self.m.addConstr(class_size_expr, GRB.EQUAL, 0, 'max_size_{}_{}'.format(course, semester)) else: self.m.addConstr(class_size_expr, GRB.LESS_EQUAL, 300, 'max_size_{}_{}'.format(course, semester)) # Constraint #2: No classes with pre-requisites in first semester for student in self.student_ids: for prereq in self.dependencies: first_semester_prereq_expr = LinExpr() first_semester_prereq_expr.add(self.solution_matrix[student, prereq[1], 1]) self.m.addConstr(first_semester_prereq_expr, GRB.EQUAL, 0, 'fs_prereq_{}_{}'.format(student, prereq)) # Constraint #3: Max course load per student per semester for student in self.student_ids: for semester in self.semester_ids: max_load_expr = LinExpr() for course in self.course_ids: max_load_expr.add(self.solution_matrix[student, course, semester]) self.m.addConstr(max_load_expr, GRB.LESS_EQUAL, 2, 'max_load_{}_{}'.format(student, semester)) # Constraint #4: Course demand for student for student in self.student_ids: for course in self.course_ids: if any([self.student_demand[student, course, semester] for semester in self.semester_ids]): demand_expr = LinExpr() for semester in self.semester_ids: demand_expr.add(self.solution_matrix[student, course, semester]) self.m.addConstr(demand_expr, GRB.EQUAL, 1, 'student_demand_{}_{}'.format(student, course)) # Constraint #5: Prerequisite courses for student in self.student_ids: for prereq in self.dependencies: prereq_expr = LinExpr() for semester in sorted(self.semester_ids)[:-1]: prereq_expr.add(self.solution_matrix[student, prereq[0], semester], 1.0) prereq_expr.add(self.solution_matrix[student, prereq[1], semester + 1], -1.0) self.m.addConstr(prereq_expr, GRB.GREATER_EQUAL, 0, 'prereq_{}_{}_{}'.format(student, prereq[0], prereq[1])) # Create the objective objective_expr = LinExpr() for student in self.student_ids: for course in self.course_ids: for semester in self.semester_ids: if self.student_demand[student, course, semester]: objective_expr.add(1 - self.solution_matrix[student, course, semester]) self.m.setObjective(objective_expr, GRB.MINIMIZE)
def solve_problem(self, xs, mus, c, k): """Optimize via gurobi. Build and solve the constrained optimization problem at the basis of the fuzzy learning procedure using the gurobi API. :param xs: objects in training set. :type xs: iterable :param mus: membership values for the objects in `xs`. :type mus: iterable :param c: constant managing the trade-off in joint radius/error optimization. :type c: float :param k: kernel function to be used. :type k: :class:`mulearn.kernel.Kernel` :raises: ValueError if optimization fails or if gurobi is not installed :returns: list -- optimal values for the independent variables of the problem. """ if not gurobi_ok: raise ValueError('gurobi not available') m = len(xs) with Env(empty=True) as env: env.setParam('OutputFlag', 0) env.start() with Model('mulearn', env=env) as model: model.setParam('OutputFlag', 0) model.setParam('TimeLimit', self.time_limit) for i in range(m): if c < np.inf: model.addVar(name=f'chi_{i}', lb=-c * (1 - mus[i]), ub=c * mus[i], vtype=GRB.CONTINUOUS) else: model.addVar(name=f'chi_{i}', vtype=GRB.CONTINUOUS) model.update() chis = model.getVars() if self.initial_values is not None: for c, i in zip(chis, self.initial_values): c.start = i obj = QuadExpr() for i, j in it.product(range(m), range(m)): obj.add(chis[i] * chis[j], k.compute(xs[i], xs[j])) for i in range(m): obj.add(-1 * chis[i] * k.compute(xs[i], xs[i])) if self.adjustment and self.adjustment != 'auto': for i in range(m): obj.add(self.adjustment * chis[i] * chis[i]) model.setObjective(obj, GRB.MINIMIZE) constEqual = LinExpr() constEqual.add(sum(chis), 1.0) model.addConstr(constEqual, GRB.EQUAL, 1) try: model.optimize() except GurobiError as e: print(e.message) if self.adjustment == 'auto': s = e.message a = float(s[s.find(' of ') + 4:s.find(' would')]) logger.warning('non-diagonal Gram matrix, ' f'retrying with adjustment {a}') for i in range(m): obj.add(a * chis[i] * chis[i]) model.setObjective(obj, GRB.MINIMIZE) model.optimize() else: raise e if model.Status != GRB.OPTIMAL: raise ValueError('optimal solution not found!') return [ch.x for ch in chis]
def compute_optimal_allocation(): from gurobipy import GRB, Model, quicksum, LinExpr import numpy from construct_lp_model import construct_lp_model cost = numpy.array([[17.8, 16.96, 13.56, 12.22, 15.88, 17.59], [13.11, 7.14, 8.57, 8.67, 7.23, 14.49], [12.62, 9.1 , 8.97, 7.75, 16.71, 16.59], [12.87, 7.14, 9.75, 13.87, 13.59, 12.37], [17.92, 14.5 , 14.91, 10.0 , 13.67, 12.56], [9.9 , 15.7 , 15.32, 16.8 , 17.34, 18.21]]) # Your code goes here m = Model('matching') # Your code goes here n_jobs = cost.shape[0] n_servers = cost.shape[1] # The following example is here to help you build the model (you must adapt it !!!) # Define decision variables x = [] for i in range(n_jobs): x_i = [] for j in range(n_servers): x_i.append(m.addVar(vtype=GRB.BINARY, name = 'var_{}_{}'.format(i, j))) x.append(x_i) # Define constraints: # each job only assigned to 1 server for i in range(n_jobs): lhs = LinExpr() for j in range(n_servers): lhs.add(x[i][j], 1) m.addConstr(lhs, GRB.EQUAL, rhs=1) # each server can do at most 2 jobs for j in range(n_servers): lhs = LinExpr() for i in range(n_jobs): lhs.add(x[i][j], 1) m.addConstr(lhs, '<=', rhs=2) # Define objective function: objexpr = LinExpr() for i in range(n_jobs): for j in range(n_servers): objexpr.add(x[i][j], cost[i, j]) m.setObjective(objexpr, GRB.MINIMIZE) m.update() m.write('matching.lp') m.optimize() total_cost = m.objVal optimal_allocation = [] for i in range(n_jobs): for j in range(n_servers): if x[i][j].x == 1: optimal_allocation.append((i + 1, j + 1)) model = m # Your code goes here # optimal_cost = optimal total cost for the company # optimal_allocation = see instructions provided in the problem description on edX # model = gurobi model returned by the lp_standard_form function return ([total_cost, optimal_allocation, model])
def polytopic_trajectory_to_set_of_polytopes(s,x0,T,list_of_polytopes,eps=0,method="bigM",timelimit=60): (model,x,u,G,theta,z)=s.library[T] print("Initial: The number of variables is %d and # constraints is %d"%(len(model.getVars()),len(model.getConstrs()))) J_area=LinExpr() d_min=model.addVar(lb=0.0001,name="new var") beta=10**2 # Weight of infinity norm model.update() coin=random() for row in range(s.n): for column in range(s.n): if coin<0.1: if row<column: model.addConstr(G[0][row,column]==0,name="constrain") elif coin>0.9: if row>column: model.addConstr(G[0][row,column]==0) if row==column: model.addConstr(G[0][row,column]>=d_min/s.weight[row]) J_area.add(-d_min*T*s.n*beta) for row in range(s.n): for t in range(T): J_area.add(-G[t][row,row]*s.weight[row]) # Starting Point and initial condition i_start=find_mode(s,x0) for i in s.modes: model.addConstr(z[0,i]==int(i==i_start)) x_delta={} for row in range(s.n): x_delta[row]=model.addVar(lb=-eps/s.weight[row],ub=eps/s.weight[row]) model.update() for row in range(s.n): model.addConstr(x[0][row,0]==x0[row,0]+x_delta[row]) model.setParam('OutputFlag',True) model.setParam('TimeLimit', timelimit) print("number of constraints is",len(model.getConstrs()),len(model.getGenConstrs())) # Terminal Constraint if method=="bigM": z_pol=terminal_constraint_set_bigM(s,x[T],G[T],model,list_of_polytopes) elif method in ["convexhull","Convex_hull","convex_hull","chull"]: z_pol=terminal_constraint_convex_hull(s,x[T],G[T],model,list_of_polytopes) else: raise(method," was not recognized. Enter either 'big-M' or 'Convex_hull' as method") model.setObjective(J_area) print("number of constraints is",len(model.getConstrs()),len(model.getGenConstrs())) model.optimize() if model.SolCount>0: flag=True print("+"*20,"Flag is True and Status is",model.Status) x_n=valuation(x) u_n=valuation(u) G_n=valuation(G) theta_n=valuation(theta) z_n=mode_sequence(s,z) # if abs(np.linalg.det(G_n[0]))<10**-15: # flag=False state_end_list=[y for y in list_of_polytopes if abs(z_pol[y].X-1)<0.1] print(len(state_end_list)) assert (len(state_end_list)==1) state_end=state_end_list[0] final=(x_n,u_n,G_n,theta_n,z_n,flag,state_end) elif model.Status!=2 and model.Status!=11: flag=False print("-"*20,"False flag",model.Status) final=(x,u,G,theta,z,flag,s.goal) else: pass print(model.getConstrByName("sadra%d"%T) in model.getConstrs(),model.getConstrByName("sadra%d"%T)) remove_new_constraints(s,model,T) print(model.getConstrByName("sadra%d"%T) in model.getConstrs(),model.getConstrByName("sadra%d"%T)) return final
def __optmize_single_project(self, x, j): ''' Given the generated x for single project, try to optimize the tardiness of the project. :param x: the assignment of resource supplier to project :param j: index of project :return: ''' m = Model("SingleProject_%d" % j) #### Create variables #### project = self.project_list[j] ## Project complete data,Project Tadeness,construction completion time CT = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(CT%d)" % j) ## Activity start time ST = {} project_activities = self.project_activity[project] # print(project_activities.nodes()) for row in project_activities.nodes(): ST[row] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="(ST%d,%s)" % (j, row)) ## Review sequence z_ij ## move to annealing objective function # y y = {} for activity_i in project_activities.nodes(): for activity_j in project_activities.nodes(): # print(project_activities.node[activity_i]) # print(dir(project_activities.node[activity_i])) if activity_i != activity_j and len(list( set(project_activities.node[activity_i]['rk_resources']).intersection( project_activities.node[activity_j]['rk_resources']))) > 0: y[activity_i, activity_j] = m.addVar(obj=0, vtype=GRB.BINARY, name="(y%d,%s,%s)" % (j, activity_i, activity_j)) m.update() #### Create constrains #### ## Constrain 2: project complete data>due data ## move to annealing objective function ## Constrain 3: supplier capacity limit ## move to annealing neighbor & random generator ## Constrain 4,6: project demand require; each project receive from one supplier for each resource ## move to annealing neighbor & random generator ## constrain 5: shipping constrain ## move to annealing neighbor & random generator ## Constrain 7:budget limit ## move to annealing constraint valid ## Constrain 8: activity starting constrain for a in project_activities.nodes(): for r in project_activities.node[a]['resources']: resource_delivered_days = 0 for s in self.resource_supplier_list[r]: resource_delivered_days += x.get((r, s, project), 0) * \ (self.resource_supplier_release_time[r, s] + self.supplier_project_shipping[ r, s, project]) m.addConstr(resource_delivered_days, GRB.LESS_EQUAL, ST[a], name="constraint_8_project_%d_activity_%s_resource_%s" % (j, a, r)) ## Constrain 9 activity sequence constrain for row1, row2 in project_activities.edges(): # print(row1, '#', row2, '#', j) # print(ST) m.addConstr(ST[row1] + project_activities.node[row1]['duration'], GRB.LESS_EQUAL, ST[row2], name="constraint_9_project_%d_activity_%s_activity_%s" % (j, row1, row2)) ## Constrain 10,11 for row1 in project_activities.nodes(): for row2 in project_activities.nodes(): if row1 != row2 and len(list( set(project_activities.node[row1]['rk_resources']).intersection( project_activities.node[row2]['rk_resources']))) > 0: m.addConstr(ST[row1] + project_activities.node[row1]['duration'] - self.M * ( 1 - y[row1, row2]), GRB.LESS_EQUAL, ST[row2], name="constraint_10_project_%d_activity_%s_activity_%s" % (j, row1, row2)) m.addConstr( ST[row2] + project_activities.node[row2]['duration'] - self.M * (y[row1, row2]), GRB.LESS_EQUAL, ST[row1], name="constraint_11_project_%d_activity_%s_activity_%s" % (j, row1, row2)) # m.addConstr(y[j,row1,row2]+y[j,row2,row1],GRB.LESS_EQUAL,1) ## Constrain 12 for row in project_activities.nodes(): # print(project_activities.node[row]['duration']) m.addConstr(CT, GRB.GREATER_EQUAL, ST[row] + project_activities.node[row]['duration'], name="constraint_12_project_%d_activity_%s" % (j, row)) ## Constrain 13 ## move to anealing objective function ## Constrain 14 ## move to anealing objective function ## Constrain 15 ## move to anealing objective function ## Constrain 16 ## move to anealing objective function ## Constrain 17 ## move to anealing objective function m.update() # Set optimization objective - minimize completion time expr = LinExpr() expr.add(CT) m.setObjective(expr, GRB.MINIMIZE) m.update() ########################################## m.params.presolve = 1 m.update() # Solve # m.params.presolve=0 m.optimize() m.write(join(self.output_dir, "heuristic_%d.lp" % j)) m.write(join(self.output_dir, "heuristic_%d.sol" % j)) return m.objVal
def polytope_trajectory(s, x0, state_end, T, alpha_start, eps=0.1, coin=random()): (model, x, u, G, theta, z) = s.library[T] n_vars = len(model.getVars()) n_constraints = len(model.getConstrs()) new_var_count = 0 new_constraint_count = 0 J_area = LinExpr() d_min = model.addVar(lb=0.0001, name="new var %d" % new_var_count) new_var_count += 1 beta = 10**2 # Weight of infinity norm model.update() print("coin=", coin) for row in range(s.n): for column in range(s.n): if coin < 0.1: if row < column: model.addConstr(G[0][row, column] == 0, name="constraint %d" % new_constraint_count) new_constraint_count += 1 elif coin > 0.9: if row > column: model.addConstr(G[0][row, column] == 0, name="constraint %d" % new_constraint_count) new_constraint_count += 1 if row == column: model.addConstr(G[0][row, column] >= d_min / s.weight[row], name="constraint %d" % new_constraint_count) new_constraint_count += 1 J_area.add(-d_min * T * s.n * beta) for row in range(s.n): for t in range(T): J_area.add(-G[t][row, row] * s.weight[row]) # Terminal Constraint terminal_constraint(s, x, G, T, model, state_end) # Starting Point i_start = find_mode(s, x0) for i in s.modes: model.addConstr(z[0, i] == int(i == i_start)) # model.setParam('OutputFlag',False) if alpha_start == -1: x_delta = {} for row in range(s.n): x_delta[row] = model.addVar(lb=-eps / s.weight[row], ub=eps / s.weight[row]) model.update() for row in range(s.n): model.addConstr(x[0][row, 0] == x0[row, 0] + x_delta[row]) model.setObjective(J_area) model.optimize() else: model.setObjective(J_area) model.optimize() if model.Status != 2 and model.Status != 11: flag = False print("*" * 20, "False flag", model.Status) final = (x, u, G, theta, z, flag) else: flag = True x_n = valuation(x) u_n = valuation(u) G_n = valuation(G) theta_n = valuation(theta) z_n = mode_sequence(s, z) # if abs(np.linalg.det(G_n[0]))<10**-15: # flag=False final = (x_n, u_n, G_n, theta_n, z_n, flag) print("starting removal process") print("start=", n_vars, "variables and ", n_constraints, " constraints") new_n_vars = len(model.getVars()) new_n_constraints = len(model.getConstrs()) print("new:", new_n_vars, "variables and ", new_n_constraints, " constraints") time_start = time() remove_new_constraints(s, model, T) print("end of removal in ", time() - time_start, " seconds") return final
def build(self, cluster, minnum=None, maxnum=None, minsize=None, expcov=None, maxcopy=None, min_cov=None, cov_weight=1000, log_path=None): log.debug(str(self)) if not minnum: minnum = self.minnum if not maxnum: maxnum = self.minnum if not minsize: minsize = self.minsize if not expcov: expcov = self.expcov if not maxcopy: maxcopy = self.maxcopy if not cov_weight: cov_weight = self.cov_weight candidates = {} for c in cluster.candidates: for copy_num in range(maxcopy): copy = SeqRecord('{}_{}'.format(c.id, copy_num), '') copy.variants = c.variants candidates[copy.id] = copy ## build structures to access data all_variants = set() for c in candidates.values(): all_variants = all_variants.union(set(c.variants)) candidate_variants = all_variants for r in cluster: all_variants = all_variants.union(set(r.variants)) var_to_reads = dict([(v, {}) for v in all_variants]) for r in cluster: for v in r.variants: try: var_to_reads[v][r.id] = r except KeyError as e: var_to_reads[v] = {r.id: r} ## Build delta exists values copy_number = {} for c in candidates.values(): delta_var = self.addVar(vtype=gurobipy.GRB.BINARY, name='D|{}|{}'.format(r.id, c.id)) copy_number[c.id] = delta_var c.delta = delta_var ## Constrain the total number of genes self.addConstr( self.quicksum([c.delta for c in candidates.values()]) >= minnum) self.addConstr( self.quicksum([c.delta for c in candidates.values()]) <= maxnum) ## Build D[cand][read] assignment variable assignment = {} for r in cluster: assignment[r.id] = {} for c in candidates.values(): d_var = self.addVar(vtype=gurobipy.GRB.BINARY, name='D|{}|{}'.format(r.id, c.id)) if ( r.candidate ) == c.id: ## start with assignment of r to copy 0 of its prior candidate d_var.Start = 1 else: d_var.Start = 0 assignment[r.id][c.id] = d_var try: c.assignment[r.id] = d_var except AttributeError: c.assignment = {r.id: d_var} self.addConstr(c.delta >= d_var) r.assignment = assignment[r.id] self.addConstr(self.quicksum(r.assignment.values()), gurobipy.GRB.EQUAL, 1) ## Constrain minimum size if minsize: for c in candidates.values(): self.addConstr( self.quicksum(c.assignment.values()) * c.delta >= minsize * c.delta) ## build overall error var_cov_save = {} var_cov_missing = {} var_cov_error = LinExpr() var_count_save = {} var_count_error = LinExpr() counter = 1 for var, reads in var_to_reads.iteritems(): if counter % 100 == 0: log.debug('Finished adding {} of {} variants to model'.format( counter, len(var_to_reads))) counter += 1 for c in candidates.values(): sum_reads = LinExpr() for d in [r.assignment[c.id] for r in reads.values()]: sum_reads.add(d) ## Variant coverage c, var building if var in c.variants: var_cov_expr = LinExpr() var_cov_abs = self.addVar(vtype=gurobipy.GRB.INTEGER, name='abs_var_cov|{}|{}'.format( c.id, v)) var_cov_data = VarWrapper(var_cov_abs) ## save var_cov_abs try: var_cov_save[c.id][var] = var_cov_data except KeyError: var_cov_save[c.id] = {var: var_cov_data} c.var_cov = var_cov_save[c.id] var_cov_data.count = sum_reads var_cov_expr.add(sum_reads) var_cov_expr.add(-(c.delta * expcov)) var_cov = self.addVar(vtype=gurobipy.GRB.INTEGER, lb=-GRB.INFINITY, name='var_cov|{}|{}'.format(c.id, v)) self.addConstr((var_cov == var_cov_expr)) self.addGenConstrAbs(var_cov_abs, var_cov) ## add to error term 1: # var_cov_error.add(var_cov_abs/float(len(c.variants))) var_cov_error.add(var_cov_abs) elif var in candidate_variants.difference(c.variants): try: var_cov_missing[c.id][var] = sum_reads except KeyError: var_cov_missing[c.id] = {var: sum_reads} c.var_cov_missing = var_cov_missing[c.id] var_cov_error.add(sum_reads) ## variant count error var_count_abs = self.addVar(vtype=gurobipy.GRB.INTEGER, name='abs_var_count|{}|{}'.format( c.id, v)) var_count_data = VarWrapper(var_count_abs) var_count_expr = LinExpr() try: var_count_save[c.id][var] = var_count_data except KeyError: var_count_save[c.id] = {var: var_count_data} c.var_count = var_count_save[c.id] var_count_data.count = sum_reads var_count_expr.add(sum_reads) var_count_expr.add(-(c.delta * expcov)) var_count = self.addVar(vtype=gurobipy.GRB.INTEGER, lb=-gurobipy.GRB.INFINITY, name='var_count|{}|{}'.format(c.id, v)) self.addConstr((var_count == var_count_expr)) self.addGenConstrAbs(var_count_abs, var_count) ## make P variable p = None if min_cov: p = self.add_p_var(var_count_abs, sum_reads, min_cov) var_count_final = self.addVar(vtype=gurobipy.GRB.INTEGER) self.addConstr(var_count_final == var_count_abs * p) else: var_count_final = var_count_abs var_count_data.total = var_count_final var_count_expr = LinExpr() var_count_data.p_var = p ## add to error term 2 var_count_error.add(var_count_final) var_cov_error = var_cov_error * cov_weight self.objective = LinExpr(var_cov_error + var_count_error) self.var_count_error = var_count_error self.var_cov_error = var_cov_error self.candidates = candidates self.reads = cluster.reads self.var_to_reads = var_to_reads
def _sensitivity_for_constraints(AT, j, project, y_, project_activity, M): global _round, _pa_dataset m = Model("SingleProject_%d_for_sensitivity" % j) m.setParam('OutputFlag', False) # m.params.IntFeasTol = 1e-7 ## Project complete data,Project Tadeness,construction completion time CT = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="CT_%d" % j) ## Activity start time ST = {} project_activities = project_activity[project] for row in project_activities.nodes(): ST[row] = m.addVar(obj=0, vtype=GRB.CONTINUOUS, name="ST_%d_%s" % (j, row)) ## Review sequence z_ij ## move to annealing objective function m.update() ## Constrain 8: activity starting constrain # equation 20 for a in project_activities.nodes(): for r in project_activities.node[a]['resources']: m.addConstr(ST[a], GRB.GREATER_EQUAL, AT[j, r], name="constraint_8_project_%d_activity_%s_resource_%s" % (j, a, r)) ## Constrain 9 activity sequence constrain # equation 21 for row1, row2 in project_activities.edges(): m.addConstr(ST[row1] + project_activities.node[row1]['duration'], GRB.LESS_EQUAL, ST[row2], name="constraint_9_project_%d_activity_%s_activity_%s" % (j, row1, row2)) ## Constrain 10,11 for row1 in project_activities.nodes(): for row2 in project_activities.nodes(): if row1 != row2 and len(list( set(project_activities.node[row1]['rk_resources']).intersection( project_activities.node[row2]['rk_resources']))) > 0: # equation 22 m.addConstr(ST[row1] + project_activities.node[row1]['duration'] - M * ( 1 - _get_y_for_activities(y_, row1, row2)), GRB.LESS_EQUAL, ST[row2], name="constraint_10_project_%d_activity_%s_activity_%s" % (j, row1, row2)) # equation 23 m.addConstr( ST[row2] + project_activities.node[row2]['duration'] - M * _get_y_for_activities(y_, row1, row2), GRB.LESS_EQUAL, ST[row1], name="constraint_11_project_%d_activity_%s_activity_%s" % (j, row1, row2)) # m.addConstr(y[j,row1,row2]+y[j,row2,row1],GRB.LESS_EQUAL,1) ## Constrain 12 # equation 24 for row in project_activities.nodes(): m.addConstr(CT, GRB.GREATER_EQUAL, ST[row] + project_activities.node[row]['duration'], name="constraint_12_project_%d_activity_%s" % (j, row)) m.update() # Set optimization objective - minimize completion time expr = LinExpr() expr.add(CT) m.setObjective(expr, GRB.MINIMIZE) m.update() ########################################## # m.params.presolve = 1 m.update() m.setParam(GRB.Param.Method, 0) m.update() # Solve # m.params.presolve=0 m.optimize() _skja = {} for c in m.getConstrs(): if c.ConstrName.startswith('constraint_8_project'): splits = c.ConstrName.split('_') r = splits[7] if r not in _skja: _skja[r] = [] _skja[r].append(c.Pi) # if c.Pi != 0: # logging.debug('project %d binding resource:%s Pi:%.4g' % (j, splits[-1], c.Pi)) # else: # logging.debug('project %d not binding resource:%s Pi:%.4g' % (j, splits[-1], c.Pi)) _pa_dataset.loc[_pa_dataset.shape[0]] = [_round, j, r, splits[5], c.Pi] _skj = {} for r in _skja: _skj[j, r] = max(_skja[r]) _pa_max_dataset.loc[_pa_max_dataset.shape[0]] = [_round, j, r, max(_skja[r])] return _skj