예제 #1
1
    def K_dominance_check(self, _V_best_d, Q_d):
        """
        :param _V_best_d: a list of d-dimension
        :param Q_d: a list of d-dimension
        :return: True if _V_best_d is prefered to Q_d regarding self.Lambda_inequalities and using Kdominance
         other wise it returns False
        """
        _d = len(_V_best_d)

        prob = LpProblem("Ldominance", LpMinimize)
        lambda_variables = LpVariable.dicts("l", range(_d), 0)

        for inequ in self.Lambda_ineqalities:
            prob += lpSum([inequ[j + 1] * lambda_variables[j] for j in range(0, _d)]) + inequ[0] >= 0

        prob += lpSum([lambda_variables[i] * (_V_best_d[i]-Q_d[i]) for i in range(_d)])

        #prob.writeLP("show-Ldominance.lp")

        status = prob.solve()
        LpStatus[status]
        result = value(prob.objective)

        if result < 0:
            return False

        return True
예제 #2
0
def solve_jiang_guan_lp(return_samples, targets, asset_partition):
    n = return_samples.shape[1]
    prob = pulp.LpProblem('Jiang Guan DCCP Sample Approximation', pulp.LpMinimize)
    x = [pulp.LpVariable('Asset {0:d} Weight'.format(i), 0.0, 1.0) for i in range(n)]
    mu = calc_first_moment(return_samples)
    prob += pulp.lpDot(mu, x), 'Sample Mean Return'

    i = 0
    for sample in return_samples:
        prob += (pulp.lpDot(sample, x) >= targets[0],
                 'Global return target for sample {0:d}'.format(i))
        i += 1

    i = 0
    j = 1
    for assets in asset_partition:
        for sample in return_samples:
            prob += (pulp.lpSum([sample[k] * x[k] for k in range(n)]) >= targets[j],
                     'Return target for segment {0:d} for sample {1:d}'.format(j, i))
            i += 1
        j += 1

    prob += (pulp.lpSum(x) == 1.0, 'Fully invested portfolio requirement')

    prob.writeLP('JiangGuanDccp.lp')
    prob.solve()
    status = pulp.LpStatus[prob.status]
    print 'Status: {0:s}'.format(status)
    return np.array([v.varValue for v in prob.variables()]), status
예제 #3
0
def solve(g):
    el = g.get_edge_list()
    nl = g.get_node_list()
    p = LpProblem('min_cost', LpMinimize)
    capacity = {}
    cost = {}
    demand = {}
    x = {}
    for e in el:
        capacity[e] = g.get_edge_attr(e[0], e[1], 'capacity')
        cost[e] = g.get_edge_attr(e[0], e[1], 'cost')
    for i in nl:
        demand[i] = g.get_node_attr(i, 'demand')
    for e in el:
        x[e] = LpVariable("x"+str(e), 0, capacity[e])
    # add obj
    objective = lpSum (cost[e]*x[e] for e in el)
    p += objective
    # add constraints
    for i in nl:
        out_neig = g.get_out_neighbors(i)
        in_neig = g.get_in_neighbors(i)
        p += lpSum(x[(i,j)] for j in out_neig) -\
             lpSum(x[(j,i)] for j in in_neig)==demand[i]
    p.solve()
    return x, value(objective)
예제 #4
0
    def _GetTotalEnergyProblem(self, min_driving_force=0, objective=pulp.LpMinimize):
        
        # Define and apply the constraints on the concentrations
        ln_conc_lb, ln_conc_ub = self._MakeLnConcentratonBounds()

        # Create the driving force variable and add the relevant constraints
        A, b, _c = self._MakeDrivingForceConstraints(ln_conc_lb, ln_conc_ub)
       
        lp = pulp.LpProblem("OBD", objective)
        
        # ln-concentration variables
        l = pulp.LpVariable.dicts("l", ["%d" % i for i in xrange(self.Nc)])
        x = [l["%d" % i] for i in xrange(self.Nc)] + [min_driving_force]
        
        total_g = pulp.LpVariable("g_tot")
        
        for j in xrange(A.shape[0]):
            row = [A[j, i] * x[i] for i in xrange(A.shape[1])]
            lp += (pulp.lpSum(row) <= b[j, 0]), "energy_%02d" % j
        
        total_g0 = float(self.dG0_r_prime * self.fluxes.T)
        total_reaction = self.S * self.fluxes.T
        row = [total_reaction[i, 0] * x[i] for i in xrange(self.Nc)]
        lp += (total_g == total_g0 + pulp.lpSum(row)), "Total G"

        lp.setObjective(total_g)
        
        #lp.writeLP("../res/total_g.lp")
        
        return lp, total_g
예제 #5
0
        def setup():
            # ... declare variables
            x = ilp.LpVariable.dicts('x', nodes_vars.keys(), 0, 1, ilp.LpBinary)
            y = ilp.LpVariable.dicts('y',
                                     [(i, j) for i, j in edges] + [(j, i) for i, j in edges],
                                     0, 1, ilp.LpBinary)
            limits = defaultdict(int)
            for i, j in edges:
                limits[i] += 1
                limits[j] += 1

            # ... define the problem
            prob = ilp.LpProblem("Factorizer", ilp.LpMinimize)

            # ... define the constraints
            for i in nodes_vars:
                prob += ilp.lpSum(y[(i, j)] for j in nodes_vars if (i, j) in y) <= limits[i]*x[i]

            for i, j in edges:
                prob += y[(i, j)] + y[(j, i)] == 1

            # ... define the objective function (min number of factorizations)
            prob += ilp.lpSum(x[i] for i in nodes_vars)

            return x, prob
예제 #6
0
파일: solveByLP.py 프로젝트: shinsyzgz/429A
def opt(C, X):
    orderNum = len(X)
    routeNum = len(C)
    routeIdx = range(routeNum)
    orderIdx = range(orderNum)
    # print routeIdx,orderIdx
    eps = 1.0 / 10 ** 7
    print eps
    var_choice = lp.LpVariable.dicts('route', routeIdx, cat='Binary')
    # var_choice=lp.LpVariable.dicts('route',routeIdx,lowBound=0)#尝试松弛掉01变量
    exceed_labor = lp.LpVariable('Number of routes exceed 1000', 0)
    prob = lp.LpProblem("lastMile", lp.LpMinimize)

    prob += exceed_labor * 100000 + lp.lpSum(var_choice[i] * C[i] for i in routeIdx)

    prob += lp.lpSum(var_choice[i] for i in routeIdx) <= 1000 + exceed_labor + eps
    for i in orderIdx:
        prob += lp.lpSum(var_choice[j] for j in X[i]) >= (1 - eps)

    prob.solve(lp.CPLEX(msg=0))
    print "\n\nstatus:", lp.LpStatus[prob.status]
    if lp.LpStatus[prob.status] != 'Infeasible':
        obj = lp.value(prob.objective)
        print "\n\nobjective:", obj
        sol_list = [var_choice[i].varValue for i in routeIdx]
        print "\n\nroutes:", (sum(sol_list))
        # print "\n\noriginal problem:\n",prob
        return obj, sol_list, lp.LpStatus[prob.status]
    else:
        return None, None, lp.LpStatus[prob.status]
예제 #7
0
    def _create_lp(self):
        ''' See base class.
        '''
        self.model._create_lp()
        self.lp_model_max_slack = self.model.lp_model.deepcopy()

        input_slack_vars = pulp.LpVariable.dicts(
            'input_slack', self.strongly_disposal_input_categories,
            0, None, pulp.LpContinuous)
        output_slack_vars = pulp.LpVariable.dicts(
            'output_slack', self.strongly_disposal_output_categories,
            0, None, pulp.LpContinuous)

        # change objective function
        self.lp_model_max_slack.sense = pulp.LpMaximize
        self.lp_model_max_slack.objective = (
            pulp.lpSum(list(input_slack_vars.values())) +
            pulp.lpSum(list(output_slack_vars.values())))

        # change constraints
        for input_category in self.strongly_disposal_input_categories:
            name = self.model._constraints[input_category]
            self.lp_model_max_slack.constraints[name].addterm(
                input_slack_vars[input_category], 1)
            self.lp_model_max_slack.constraints[name].sense = pulp.LpConstraintEQ

        for output_category in self.strongly_disposal_output_categories:
            name = self.model._constraints[output_category]
            self.lp_model_max_slack.constraints[name].addterm(
                output_slack_vars[output_category], -1)
            self.lp_model_max_slack.constraints[name].sense = pulp.LpConstraintEQ
    def _MakeMDFProblemDual(self):
        """Create a CVXOPT problem for finding the Maximal Thermodynamic
        Driving Force (MDF).
       
        Does not set the objective function... leaves that to the caller.
       
        Returns:
            the linear problem object, and the four types of variables as arrays
        """
        A, b, c, w, g, z, u = self._GetDualVariablesAndConstants()
        x = w + g + z + u
        lp = pulp.LpProblem("MDF_DUAL", pulp.LpMinimize)

        cnstr_names = ["y_%02d" % j for j in xrange(self.Nr)] + \
                      ["l_%02d" % j for j in xrange(self.Nc)] + \
                      ["MDF"]
        
        for i in xrange(A.shape[1]):
            row = [A[j, i] * x[j] for j in xrange(A.shape[0])]
            lp += (pulp.lpSum(row) == c[i, 0]), cnstr_names[i]

        objective = pulp.lpSum([b[i] * x[i] for i in xrange(A.shape[0])])
        lp.setObjective(objective)
        
        lp.writeLP("res/mdf_dual.lp")
        
        return lp, objective, w, g, z, u
예제 #9
0
 def _MakeMDFProblem(self):
     """Create a PuLP problem for finding the Maximal Thermodynamic
     Driving Force (MDF).
    
     Does not set the objective function... leaves that to the caller.
    
     Args:
         c_range: a tuple (min, max) for concentrations (in M).
         bounds: a list of (lower bound, upper bound) tuples for compound
             concentrations.
    
     Returns:
         A tuple (dgf_var, motive_force_var, problem_object).
     """
     # Create the driving force variable and add the relevant constraints
     A, b, c = self._MakeDrivingForceConstraints()
    
     lp = pulp.LpProblem("MDF", pulp.LpMaximize)
     
     # ln-concentration variables
     _l = pulp.LpVariable.dicts("lnC", ["%d" % i for i in xrange(self.Nc)])
     B = pulp.LpVariable("B")
     lnC = [_l["%d" % i] for i in xrange(self.Nc)] + [B]
     
     for j in xrange(A.shape[0]):
         row = [A[j, i] * lnC[i] for i in xrange(A.shape[1])]
         lp += (pulp.lpSum(row) <= b[j, 0]), "energy_%02d" % j
     
     objective = pulp.lpSum([c[i] * lnC[i] for i in xrange(A.shape[1])])
     lp.setObjective(objective)
     
     #lp.writeLP("res/MDF_primal.lp")
     
     return lp, lnC
예제 #10
0
    def fit(self, x, y):
        classifiers = []
        classifier_weights = []
        clf = self.base_estimator


        # get predictions of one classifer
        clf.fit(x, y)
        y_predict = clf.predict(x)
        u = (y_predict == y).astype(int)
        u[u == 0] = -1
        for index, value in enumerate(u):
            if value == -1:
                print index


        # solving linear programming
        d = pp.LpVariable.dicts("d", range(len(y)), 0, 1)
        prob = pp.LpProblem("LPboost", pp.LpMinimize)
        prob += pp.lpSum(d) == 1  # constraint for sum of weights to be 1
        # objective function
        objective_vector = []
        for index in range(len(y)):
            objective_vector.append(d[index] * u[index])
        prob += pp.lpSum(objective_vector)


        print pp.LpStatus[prob.solve()]
        for v in prob.variables():
            if v.varValue > 0:
                print v.name + "=" + str(v.varValue)
예제 #11
0
    def K_dominnace_check_2(self, u_d, v_d, _inequalities):
        """

        :param u_d: a d-dimensional vector(list) like [ 8.53149891  3.36436796]
        :param v_d: tha same list like u_d
        :param _inequalities: list of constraints on d-dimensional Lambda Polytope like
         [[0, 1, 0], [1, -1, 0], [0, 0, 1], [1, 0, -1], [0.0, 1.4770889, -3.1250839]]
        :return: True if u is Kdominance to v regarding given _inequalities otherwise False
        """
        _d = len(u_d)

        prob = LpProblem("Kdominance", LpMinimize)
        lambda_variables = LpVariable.dicts("l", range(_d), 0)

        for inequ in _inequalities:
            prob += lpSum([inequ[j + 1] * lambda_variables[j] for j in range(0, _d)]) + inequ[0] >= 0

        prob += lpSum([lambda_variables[i] * (u_d[i]-v_d[i]) for i in range(_d)])

        #prob.writeLP("show-Ldominance.lp")

        status = prob.solve()
        LpStatus[status]

        result = value(prob.objective)
        if result < 0:
            return False

        return True
예제 #12
0
def good_annotation_locations(item, annotate_first=True):
    """Find the minimum number of annotations necessary to extract all the fields

    Since annotations can be reviewed and modified later by the user we want to keep
    just the minimum number of them.

    Parameters
    ----------
    item : Item
    annotate_first : book
        If true always annotate the first instance of the item in the page

    Returns
    -------
    List[ItemLocation]
    """
    #    x[i] = 1 iff i-th item is representative
    # A[i, j] = 1 iff i-th item contains the j-th field
    #
    # Solve:
    #           min np.sum(x)
    # Subject to:
    #           np.all(np.dot(A.T, x) >= np.repeat(1, len(fields)))
    index_locations = {location: i for i, location in enumerate(item.locations)}
    P = pulp.LpProblem("good_annotation_locations", pulp.LpMinimize)
    X = [pulp.LpVariable("x{0}".format(i), cat="Binary") for i in range(len(index_locations))]
    if annotate_first:
        P += X[0] == 1
    P += pulp.lpSum(X)
    for field in item.fields:
        P += pulp.lpSum([X[index_locations[location.item]] for location in field.locations]) >= 1
    P.solve()
    return [i for (i, x) in enumerate(X) if x.value() == 1]
예제 #13
0
 def _MakeMDFProblem(self):
     """Create a CVXOPT problem for finding the Maximal Thermodynamic
     Driving Force (MDF).
    
     Does not set the objective function... leaves that to the caller.
    
     Returns:
         the linear problem object, and the three types of variables as arrays
     """
     A, b, c, y, l = self._GetPrimalVariablesAndConstants()
     B = pulp.LpVariable("mdf")
     x = y + l + [B]
     lp = pulp.LpProblem("MDF_PRIMAL", pulp.LpMaximize)
     
     cnstr_names = ["driving_force_%02d" % j for j in xrange(self.Nr_active)] + \
                   ["covariance_var_ub_%02d" % j for j in xrange(self.Nr)] + \
                   ["covariance_var_lb_%02d" % j for j in xrange(self.Nr)] + \
                   ["log_conc_ub_%02d" % j for j in xrange(self.Nc)] + \
                   ["log_conc_lb_%02d" % j for j in xrange(self.Nc)]
       
     for j in xrange(A.shape[0]):
         row = [A[j, i] * x[i] for i in xrange(A.shape[1])]
         lp += (pulp.lpSum(row) <= b[j, 0]), cnstr_names[j]
     
     objective = pulp.lpSum([c[i] * x[i] for i in xrange(A.shape[1])])
     lp.setObjective(objective)
     
     lp.writeLP("res/mdf_primal.lp")
     
     return lp, objective, y, l, B
예제 #14
0
파일: LEMON.py 프로젝트: andreaspap/LEMON
def min_one_norm(B,initial_seed,seed):

    weight_initial = 1 / float(len(initial_seed))
    weight_later_added = weight_initial / float(0.5)
    difference = len(seed) - len(initial_seed)
    [r,c] = B.shape
    prob = pulp.LpProblem("Minimum one norm", pulp.LpMinimize)
    indices_y = range(0,r)
    y = pulp.LpVariable.dicts("y_s", indices_y, 0)
    indices_x = range(0,c)
    x = pulp.LpVariable.dicts("x_s", indices_x)

    f = dict(zip(indices_y, [1.0]*r))

    prob += pulp.lpSum(f[i] * y[i] for i in indices_y) # objective function
    
    prob += pulp.lpSum(y[s] for s in initial_seed) >= 1

    prob += pulp.lpSum(y[r] for r in seed) >= 1 + weight_later_added * difference

    for j in range(r):
        temp = dict(zip(indices_x, list(B[j,:])))
        prob += pulp.lpSum(y[j] + (temp[k] * x[k] for k in indices_x)) == 0

    prob.solve()

    print "Status:", pulp.LpStatus[prob.status]
    result = []
    for var in indices_y:
        result.append(y[var].value())
   
    return result 
예제 #15
0
파일: lpc.py 프로젝트: pangzy/experiment
    def def_problem(self, queue_a):
        """define lp problem"""
        n = self.n
        tn = self.tn
        tl = self.tl
        b = self.b

        iIdx = range(n)
        tIdx = range(tn)
        ti = range(n)
        si = range(n)
        Ti = range(n)
        Si = range(n)

        for i in xrange(n):
            ti[i] = queue_a[i].at
            si[i] = queue_a[i].tsize
            Ti[i] = queue_a[i].ft
            Si[i] = queue_a[i].size
            iIdx[i] = str(i)

        for t in xrange(tn):
            tIdx[t] = str(t)

        """-----------------------------------
        PuLP variable definition
        varb--bi(t)
        prob--objective and constraints

        objective:
            max:[0~n-1][0~ti-1])sigma bi(t)

        constraints:
            1.any t,[0~n-1] sigma bi(t)   <= B
            2.any i,[0-Ti]  sigma bi(t)*tl>= si
            3.any i,[0~T-1] sigma bi(t)*tl<= Si
        ------------------------------------"""
        print "\ndefine lp variables"
        self.varb = pulp.LpVariable.dicts('b',(iIdx,tIdx),0,b,cat='Integer')

        print "define lp problem"
        self.prob = pulp.LpProblem('Prefetching Schedule',pulp.LpMaximize)

        print "define lp objective"
        self.prob += pulp.lpSum([tl*self.varb[i][t] for i in iIdx for t in tIdx if int(t)<ti[int(i)]])

        print "define constraints on B"
        for t in tIdx:
            self.prob += pulp.lpSum([self.varb[i][t] for i in iIdx]) <= b

        print "define constraints on si"
        for i in iIdx:
            self.prob += pulp.lpSum([tl*self.varb[i][t] for t in tIdx if int(t)<=Ti[int(i)]]) >= si[int(i)]

        print "define constraints on Si"
        for i in iIdx:
            self.prob += pulp.lpSum([tl*self.varb[i][t] for t in tIdx]) <= Si[int(i)]
예제 #16
0
	def get_linear_program_solution(self, c, b, A, x):
		prob = LpProblem("myProblem", LpMinimize)
		prob += lpSum([xp*cp for xp, cp in zip(x, c)]), "Total Cost of Ingredients per can" 

		for row, cell in zip(A, b):
			prob += lpSum(ap*xp for ap, xp in zip(row,  x)) <= cell

		solved = prob.solve()
		return prob
def addFirstLastTaskConstraints(prob, numTasks, numDays, xihtVariables, xhitVariables):
    for t in range(numDays):
        xihtList = []
        xhitList = []
        for i in range(numTasks):
            xihtList.append(xihtVariables[i][t])
            xhitList.append(xhitVariables[i][t])
        prob += pulp.lpSum(xihtList) == pulp.lpSum(xhitList)
        prob += pulp.lpSum(xihtList) <= 1
        prob += pulp.lpSum(xhitList) <= 1
def portfolio_lp(
    weights, latest_prices, min_allocation=0.01, total_portfolio_value=10000
):
    """
    For a long only portfolio, convert the continuous weights to a discrete allocation
    using Mixed Integer Linear Programming. This can be thought of as a clever way to round
    the continuous weights to an integer number of shares

    :param weights: continuous weights generated from the ``efficient_frontier`` module
    :type weights: dict
    :param latest_prices: the most recent price for each asset
    :type latest_prices: pd.Series or dict
    :param min_allocation: any weights less than this number are considered negligible,
                           defaults to 0.01
    :type min_allocation: float, optional
    :param total_portfolio_value: the desired total value of the portfolio, defaults to 10000
    :type total_portfolio_value: int/float, optional
    :raises TypeError: if ``weights`` is not a dict
    :raises TypeError: if ``latest_prices`` isn't a series
    :raises ValueError: if not ``0 < min_allocation < 0.3``
    :return: the number of shares of each ticker that should be purchased, along with the amount
             of funds leftover.
    :rtype: (dict, float)
    """
    import pulp

    if not isinstance(weights, dict):
        raise TypeError("weights should be a dictionary of {ticker: weight}")
    if not isinstance(latest_prices, (pd.Series, dict)):
        raise TypeError("latest_prices should be a pd.Series")
    if min_allocation > 0.3:
        raise ValueError("min_allocation should be a small float")
    if total_portfolio_value <= 0:
        raise ValueError("total_portfolio_value must be greater than zero")

    m = pulp.LpProblem("PfAlloc", pulp.LpMinimize)
    vals = {}
    realvals = {}
    etas = {}
    abss = {}
    remaining = pulp.LpVariable("remaining", 0)
    for k, w in weights.items():
        if w < min_allocation:
            continue
        vals[k] = pulp.LpVariable("x_%s" % k, 0, cat="Integer")
        realvals[k] = latest_prices[k] * vals[k]
        etas[k] = w * total_portfolio_value - realvals[k]
        abss[k] = pulp.LpVariable("u_%s" % k, 0)
        m += etas[k] <= abss[k]
        m += -etas[k] <= abss[k]
    m += remaining == total_portfolio_value - pulp.lpSum(realvals.values())
    m += pulp.lpSum(abss.values()) + remaining
    m.solve()
    results = {k: val.varValue for k, val in vals.items()}
    return results, remaining.varValue
예제 #19
0
    def execute_algorithm(self, input_data_set):

        hosts = input_data_set.Hosts
        vms = input_data_set.VirtualMachines
        alert = input_data_set.Alert

        prob = pulp.LpProblem("test",pulp.LpMinimize)

        #if 1 host is enabled
        u = []

        x = []

        for host in hosts:

            ulp = pulp.LpVariable("used: %s" % host.Hostname,0,1,'Integer')

            u.append(ulp)

            innerX = []
            x.append(innerX)

            n = []
            m = []
            c = []

            for vm in vms:
                values = vm.getMetrics(host) #todo optimization

                lpVar = pulp.LpVariable("host %s contains %s" % (vm.InstanceName, host.Hostname),0,1,'Integer')
                innerX.append(lpVar)

                prob += ulp >= lpVar

                c.append([lpVar,values["C"]])
                n.append([lpVar,values["N"]])
                m.append([lpVar,values["M"]])

            prob += (pulp.LpAffineExpression(c) <= 1)
            prob += (pulp.LpAffineExpression(n) <= 1)
            prob += (pulp.LpAffineExpression(m) <= 1)


        #every vm on only one host
        for i in range(len(vms)):

            lps = [item[i] for item in x]
            prob += (pulp.lpSum(lps) == 1)


        prob += pulp.lpSum(u)
        GLPK().solve(prob)

        for v in prob.variables():
            print v.name, "=", v.varValue
예제 #20
0
    def add_stoichiometric_constraints(self, weights, S, compounds, reactions,
                                       net_reaction):
        """
            S is a NCxNR matrix where the rows represent compounds and the columns represent reactions.
            b is the linear constraint vector, and is NCx1
            
            compounds is the list of compounds from KEGG (NC long)
            reactions is a list of pairs of RID and direction (NR long)
        """
        self.weights = weights
        self.S = S
        self.compounds = compounds
        self.reactions = reactions
        
        assert S.shape[0] == len(self.compounds)
        assert S.shape[1] == len(self.reactions)

        self.cids = [c.cid for c in self.compounds]

        self.physiological_conc = np.matrix(np.ones((1, len(self.compounds)))) * default_c_mid
        if 1 in self.cids:
            self.physiological_conc[0, self.cids.index(1)] = 1 # [H2O] must be set to 1 in any case
        
        # reaction fluxes are the continuous variables
        self.flux_vars = pulp.LpVariable.dicts("Flux",
                                               [r.name for r in self.reactions],
                                               lowBound=0,
                                               upBound=self.flux_upper_bound,
                                               cat=pulp.LpContinuous)

        # add a linear constraint on the fluxes for each compound (mass balance)
        for c, compound in enumerate(self.compounds):
            reactions_with_c = list(np.nonzero(self.S[c, :])[1].flat)
            if len(reactions_with_c) == 0:
                continue
            
            # If the compound participates in the net reaction, force the mass
            # balance to be -1 times the desired net reaction coefficient
            if compound.cid in net_reaction.sparse:
                mass_balance = net_reaction.sparse[compound.cid]
            else:
                mass_balance = 0.0
                
            # Sum of all fluxes involving this compounds times the stoichiometry
            # of their reactions
            cid_sum = pulp.lpSum([self.S[c, r] * self.flux_vars[self.reactions[r].name]
                                  for r in reactions_with_c])
            
            self.prob.addConstraint(cid_sum == mass_balance,
                                    "C%05d_mass_balance" % compound.cid)
        
        obj = pulp.lpSum([self.flux_vars[self.reactions[r].name]*weight
                          for (r, weight) in self.weights])
        self.prob.setObjective(obj)
예제 #21
0
def opt_with_solver(node_ind, order_dict, travel_t, stay_t, pick_t, require_t, num,
                    mini_start, initial=None, load_check=True):
    # order_dict = {ord:(ori_id, dest_id)}, ord in order_set, ori_id, dest_id in node_ind
    # node_ind = range(n), travel_t = {(i,j): travel time between i and j}
    # initial = (given start id, given load)
    big_m = 10000
    eps = 1.0/10**7
    n = len(node_ind)
    inter_n = [(i, j) for i in node_ind for j in node_ind if j != i]
    off_time = lp.LpVariable('The route-off time')
    p = lp.LpVariable.dicts('Punish cost', node_ind, lowBound=0)
    x = lp.LpVariable.dicts('Route variable', inter_n, cat='Binary')
    o = lp.LpVariable.dicts('Start-point flag', node_ind, cat='Binary')
    d = lp.LpVariable.dicts('End-point flag', node_ind, cat='Binary')
    a = lp.LpVariable.dicts('Arrival time', node_ind)
    l = lp.LpVariable.dicts('Leave time', node_ind)
    t = lp.LpVariable.dicts('Order count', node_ind, 0, n-1+eps)
    if load_check:
        load = lp.LpVariable.dicts('Arrival load', node_ind, 0, MAX_LOADS+eps)
    else:
        load = lp.LpVariable.dicts('Arrival load', node_ind, 0)
    prob = lp.LpProblem('Optimize a route', lp.LpMinimize)
    # Objective
    prob += off_time + lp.lpSum(p[i] for i in node_ind)
    # Constraints
    for j in node_ind:
        prob += lp.lpSum(x[(i, j)] for i in node_ind if i != j) == 1 - o[j]
    for i in node_ind:
        prob += lp.lpSum(x[(i, j)] for j in node_ind if j != i) == 1 - d[i]
    prob += lp.lpSum(o[i] for i in node_ind) == 1
    prob += lp.lpSum(d[i] for i in node_ind) == 1
    if not (initial is None):
        prob += o[initial[0]] == 1
        prob += load[initial[0]] == initial[1]
    for order in order_dict:
        od = order_dict[order]
        prob += a[od[1]] >= l[od[0]]
    for i in node_ind:
        prob += off_time >= l[i]
        prob += l[i] >= a[i] + stay_t[i]
        prob += l[i] >= pick_t[i]
        prob += a[i] >= mini_start[i]
        prob += p[i] >= PUNISH_CO*(a[i] - require_t[i])
    for i, j in inter_n:
        prob += a[j] >= l[i] + travel_t[(i, j)] + big_m*(x[(i, j)] - 1)
        prob += t[j] >= t[i] + 1 + big_m*(x[(i, j)] - 1)
        prob += load[j] >= load[i] + num[i] + big_m*(x[(i, j)] - 1)
    prob.solve(lp.CPLEX(msg=1, timelimit=CPLEX_TIME_LIMIT, options=['set logfile cplex/cplex%d.log' % os.getpid()]))
    # set threads 100
    if lp.LpStatus[prob.status] != 'Infeasible':
        sol_list = [int(round(t[i].varValue)) for i in node_ind]
        return lp.value(prob.objective), sol_list, lp.LpStatus[prob.status]
    else:
        return None, None, lp.LpStatus[prob.status]
예제 #22
0
def create_lscp_model(coverage_dict, model_file=None, delineator="$", ):
    """
    Creates a LSCP (Location set covering problem) using the provided coverage and
    parameters. Writes a .lp file which can be solved with Gurobi

    Church, R., & Murray, A. (2009). Coverage Business Site Selection, Location
    Analysis, and GIS (pp. 209-233). Hoboken, New Jersey: Wiley.

    :param coverage_dict: (dictionary) The coverage to use to generate the model
    :param model_file: (string) The model file to output
    :param delineator: (string) The character(s) to use to delineate the layer from the ids
    :return: (Pulp problem) The generated problem to solve
    """
    validate_coverage(coverage_dict, ["coverage"], ["binary"])
    if not isinstance(coverage_dict, dict):
        raise TypeError("coverage_dict is not a dictionary")
    if model_file and not (isinstance(model_file, str)):
        raise TypeError("model_file is not a string")
    if not isinstance(delineator, str):
        raise TypeError("delineator is not a string")
        # create the variables
    demand_vars = {}
    for demand_id in coverage_dict["demand"]:
        demand_vars[demand_id] = pulp.LpVariable("Y{}{}".format(delineator, demand_id), 0, 1, pulp.LpInteger)
    facility_vars = {}
    for facility_type in coverage_dict["facilities"]:
        facility_vars[facility_type] = {}
        for facility_id in coverage_dict["facilities"][facility_type]:
            facility_vars[facility_type][facility_id] = pulp.LpVariable(
                "{}{}{}".format(facility_type, delineator, facility_id), 0, 1, pulp.LpInteger)
    # create the problem
    prob = pulp.LpProblem("LSCP", pulp.LpMinimize)
    # Create objective, minimize number of facilities
    to_sum = []
    for facility_type in coverage_dict["facilities"]:
        for facility_id in coverage_dict["facilities"][facility_type]:
            to_sum.append(facility_vars[facility_type][facility_id])
    prob += pulp.lpSum(to_sum)
    # add coverage constraints
    for demand_id in coverage_dict["demand"]:
        to_sum = []
        for facility_type in coverage_dict["demand"][demand_id]["coverage"]:
            for facility_id in coverage_dict["demand"][demand_id]["coverage"][facility_type]:
                to_sum.append(facility_vars[facility_type][facility_id])
        # Hack to get model to "solve" when infeasible with GLPK.
        # Pulp will automatically add dummy variables when the sum is empty, since these are all the same name,
        # it seems that GLPK doesn't read the lp problem properly and fails
        if not to_sum:
            to_sum = [pulp.LpVariable("__dummy{}{}".format(delineator, demand_id), 0, 0, pulp.LpInteger)]
        prob += pulp.lpSum(to_sum) >= 1, "D{}".format(demand_id)
    if model_file:
        prob.writeLP(model_file)
    return prob
def pe185():
    """
    Modelling as an integer programming problem.
    Then using PuLP to solve it. It's really fast, just 0.24 seconds. 
    For details, see https://pythonhosted.org/PuLP/index.html
    """
    
    from pulp import LpProblem, LpVariable, LpMinimize, LpInteger, lpSum, value

    constraints = [
        ('2321386104303845', 0),
        ('3847439647293047', 1),
        ('3174248439465858', 1),
        ('8157356344118483', 1),
        ('6375711915077050', 1),
        ('6913859173121360', 1),
        ('4895722652190306', 1),
        ('5616185650518293', 2),
        ('4513559094146117', 2),
        ('2615250744386899', 2),
        ('6442889055042768', 2),
        ('2326509471271448', 2),
        ('5251583379644322', 2),
        ('2659862637316867', 2),
        ('5855462940810587', 3),
        ('9742855507068353', 3),
        ('4296849643607543', 3),
        ('7890971548908067', 3),
        ('8690095851526254', 3),
        ('1748270476758276', 3),
        ('3041631117224635', 3),
        ('1841236454324589', 3)
    ]

    VALs = map(str, range(10))
    LOCs = map(str, range(16))
    choices = LpVariable.dicts("Choice", (LOCs, VALs), 0, 1, LpInteger)

    prob = LpProblem("pe185", LpMinimize)
    prob += 0, "Arbitrary Objective Function"

    for s in LOCs:
        prob += lpSum([choices[s][v] for v in VALs]) == 1, ""

    for c, n in constraints:
        prob += lpSum([choices[str(i)][v] for i,v in enumerate(c)]) == n, ""

    prob.writeLP("pe185.lp")
    prob.solve()
    res = int(''.join(v for s in LOCs for v in VALs if value(choices[s][v])))

    # answer: 4640261571849533
    return res
예제 #24
0
파일: coke_func.py 프로젝트: coin-or/Dip
def formulate(cp):

    prob = dippy.DipProblem("Coke",
                            display_mode = 'xdot',
#                           layout = 'bak',
                            display_interval = None,
                            )

    # create variables
    LOC_SIZES = [(l, s) for l in cp.LOCATIONS
                        for s in cp.SIZES]
    buildVars = LpVariable.dicts("Build", LOC_SIZES, cat=LpBinary)

    # create arcs
    flowVars = LpVariable.dicts("Arcs", cp.ARCS)
    BIG_M = max(sum(cp.supply.values()), sum(cp.demand.values()))
    for a in cp.ARCS:
        flowVars[a].bounds(0, BIG_M)

    # objective
    prob += 1e6 * lpSum(buildVars[(l, s)] * cp.build_costs[s] \
                        for (l, s) in LOC_SIZES) + \
                  lpSum(flowVars[(s, d)] * cp.transport_costs[(s, d)] \
                        for (s, d) in cp.ARCS), "min"

    # plant availability - assumes that SIZES are numeric,
    # which they should be
    for loc in cp.LOCATIONS:
        prob += lpSum(flowVars[(loc, i)] for i in cp.CUSTOMERS) \
             <= lpSum(buildVars[(loc, s)] * s for s in cp.SIZES)

    # one size
    for loc in cp.LOCATIONS:
        prob += lpSum(buildVars[(loc, s)] for s in cp.SIZES) == 1

    # conserve flow (mines)
    # flows are in terms of tonnes of coke
    for m in cp.MINES:
        prob += lpSum(flowVars[(m, j)] for j in cp.LOCATIONS) \
             <= cp.supply[m]

    # conserve flow (locations)
    # convert from coal to coke
    for loc in cp.LOCATIONS:
        prob += lpSum(flowVars[(m, loc)] for m in cp.MINES) - \
                cp.conversion_factor * \
                lpSum(flowVars[(loc, c)] for c in cp.CUSTOMERS) \
             >= 0

    for c in cp.CUSTOMERS:
        prob += lpSum(flowVars[(loc, c)] for loc in cp.LOCATIONS) \
             >= cp.demand[c]

    prob.cp = cp
    prob.buildVars = buildVars
    prob.flowVars = flowVars
    
    return prob
예제 #25
0
def knapsack01(obj, weights, capacity):
    """ 0/1 knapsack solver, maximizes profit. weights and capacity integer """
        
    debug_subproblem = False
    
    assert len(obj) == len(weights)
    n = len(obj)
    if n == 0:
        return 0, []

    if debug_subproblem:
        relaxation = LpProblem('relaxation', LpMaximize)
        relax_vars = [str(i) for i in range(n)]
        var_dict   = LpVariable.dicts("", relax_vars, 0, 1, LpBinary)
        relaxation += (lpSum(var_dict[str(i)] * weights[i] for i in range(n)) 
                       <= capacity)
        relaxation += lpSum(var_dict[str(i)] * obj[i] for i in range(n))
        relaxation.solve()
        relax_obj = value(relaxation.objective)

        solution =  [i for i in range(n) if var_dict[str(i)].varValue > tol ]

        print relax_obj, solution


    c = [[0]*(capacity+1) for i in range(n)]
    added = [[False]*(capacity+1) for i in range(n)]
    # c [items, remaining capacity]
    # important: this code assumes strictly positive objective values
    for i in range(n):
        for j in range(capacity+1):
            if (weights[i] > j):
                c[i][j] = c[i-1][j]
            else:
                c_add = obj[i] + c[i-1][j-weights[i]]
                if c_add > c[i-1][j]:
                    c[i][j] = c_add
                    added[i][j] = True
                else:
                    c[i][j] = c[i-1][j]

    # backtrack to find solution
    i = n-1
    j = capacity

    solution = []
    while i >= 0 and j >= 0:
        if added[i][j]:
            solution.append(i)
            j -= weights[i]
        i -= 1
        
    return c[n-1][capacity], solution
def addConnectivityConstraints(prob, xijtVariables, xihtVariables, xhitVariables, yitkVariables, numTasks, numDays, yiVariables):
    for t in range(numDays):
        for i in range(numTasks):
            xjitList = []
            xijtList = []
            for j in range(numTasks):
                if i != j:
                    xjitList += xijtVariables[j][i][t]
                    xijtList += xijtVariables[i][j][t]
            prob += pulp.lpSum(xijtList) + xihtVariables[i][t] == pulp.lpSum(xjitList) + xhitVariables[i][t] # for job i sum xijt = sum xjit
            
            yitkList = []
            for k in range(len(yitkVariables[i][t])):
                yitkList += yitkVariables[i][t][k]
            prob += pulp.lpSum(xijtList) + xihtVariables[i][t] == pulp.lpSum(yitkList) # for job i sum xijt = yitk
예제 #27
0
def prepare_problem(A, b, c):
    """
    Создаёт проблемы по заданным матрице A,
    вектору b и оптимизируемой функции
    """
    problem = LpProblem(sense = pulp.constants.LpMaximize)

    x_list = [LpVariable("x" + str(i + 1), 0) for i in xrange(len(A[0]))]

    problem += lpSum(ci * xi for ci, xi in zip(c, x_list))

    for aj, bj in zip(A, b):
        problem += lpSum([aij * xi for aij, xi in zip(aj, x_list)]) == bj

    return problem
예제 #28
0
    def _MakeOBDProblemDual(self):
        """Create a CVXOPT problem for finding the Maximal Thermodynamic
        Driving Force (OBD).
       
        Does not set the objective function... leaves that to the caller.
       
        Args:
            c_range: a tuple (min, max) for concentrations (in M).
            bounds: a list of (lower bound, upper bound) tuples for compound
                concentrations.
       
        Returns:
            A tuple (dgf_var, motive_force_var, problem_object).
        """
        # Define and apply the constraints on the concentrations
        ln_conc_lb, ln_conc_ub = self._MakeLnConcentratonBounds()

        # Create the driving force variable and add the relevant constraints
        A, b, c = self._MakeDrivingForceConstraints(ln_conc_lb, ln_conc_ub)
       
        lp = pulp.LpProblem("OBD", pulp.LpMinimize)
        
        w = pulp.LpVariable.dicts("w", 
                                  ["%d" % i for i in xrange(self.Nr)],
                                  lowBound=0)

        z = pulp.LpVariable.dicts("z", 
                                  ["%d" % i for i in xrange(self.Nc)],
                                  lowBound=0)

        u = pulp.LpVariable.dicts("u", 
                                  ["%d" % i for i in xrange(self.Nc)],
                                  lowBound=0)
        
        y = [w["%d" % i] for i in xrange(self.Nr)] + \
            [z["%d" % i] for i in xrange(self.Nc)] + \
            [u["%d" % i] for i in xrange(self.Nc)]
        
        for i in xrange(A.shape[1]):
            row = [A[j, i] * y[j] for j in xrange(A.shape[0])]
            lp += (pulp.lpSum(row) == c[i, 0]), "dual_%02d" % i

        objective = pulp.lpSum([b[i] * y[i] for i in xrange(A.shape[0])])
        lp.setObjective(objective)
        
        #lp.writeLP("../res/obd_dual.lp")
        
        return lp, w, z, u
예제 #29
0
def solve_under_coverage(graph, min_coverage=80):

    prob = LpProblem("granularity selection", LpMinimize)
    codelet_vars = LpVariable.dicts("codelet",
            graph,
            lowBound=0,
            upBound=1,
            cat=LpInteger)

    # Objective function: minimize the total replay cost of selected codelets

    # Compute replay time
    for n,d in graph.nodes(data=True):
      d['_total_replay_cycles'] = 0
      for inv in d['_invocations']:
        d['_total_replay_cycles'] = d['_total_replay_cycles'] + float(inv["Invivo (cycles)"])

    prob += lpSum([codelet_vars[n]*d['_total_replay_cycles'] for n,d in graph.nodes(data=True)])

    # and with good coverage
    prob += (lpSum([codelet_vars[n]*d['_coverage'] for n,d in graph.nodes(data=True)]) >= min_coverage)

    # selected codelets should match
    for n,d in graph.nodes(data=True):
        if not d['_matching']:
            prob += codelet_vars[n] == 0

    # Finally we should never include both the children and the parents
    for dad in graph.nodes():
        for son in graph.nodes():
            if not dad in nx.ancestors(graph, son):
                continue
            # We cannot select dad and son at the same time
            prob += codelet_vars[dad] + codelet_vars[son] <= 1

    #prob.solve(GLPK())
    prob.solve()
    if (LpStatus[prob.status] != 'Optimal'):
        raise Unsolvable()

    for v in prob.variables():
        assert v.varValue == 1.0 or v.varValue == 0.0
        if v.varValue == 1.0:

            for n,d in graph.nodes(data=True):
                if ("codelet_"+str(n)) == v.name:
                    d["_selected"] = True
                    yield n
예제 #30
0
def formulate(bpp):
    prob = dippy.DipProblem("Bin Packing",
                            display_mode = 'xdot',
#                           layout = 'bak',
                            display_interval = None,
                            )

    assign_vars = LpVariable.dicts("x",
                                   [(i, j) for i in bpp.BINS
                                    for j in bpp.ITEMS],
                                   cat=LpBinary)
    use_vars    = LpVariable.dicts("y", bpp.BINS, cat=LpBinary)
    waste_vars  = LpVariable.dicts("w", bpp.BINS, 0, None)

    prob += lpSum(waste_vars[i] for i in bpp.BINS), "min_waste"

    for j in bpp.ITEMS:
        prob += lpSum(assign_vars[i, j] for i in bpp.BINS) == 1

    for i in bpp.BINS:
        prob.relaxation[i] += (lpSum(bpp.volume[j] * assign_vars[i, j]
                                for j in bpp.ITEMS) + waste_vars[i] 
                                == bpp.capacity * use_vars[i])

    for i in bpp.BINS:
        for j in bpp.ITEMS:
            prob.relaxation[i] += assign_vars[i, j] <= use_vars[i]

    if Bin_antisymmetry:
        for m in range(0, len(bpp.BINS) - 1):
            prob += use_vars[bpp.BINS[m]] >= use_vars[bpp.BINS[m + 1]]

    if Item_antisymmetry:
        for m in range(0, len(bpp.BINS)):
            for n in range(0, len(bpp.ITEMS)):
                if m > n:
                    i = bpp.BINS[m]
                    j = bpp.ITEMS[n]
                    prob += assign_vars[i, j] == 0

    # Attach the problem data and variable dictionaries
    # to the DipProblem 
    prob.bpp         = bpp
    prob.assign_vars = assign_vars
    prob.use_vars    = use_vars
    prob.waste_vars  = waste_vars

    return prob
예제 #31
0
    def IP_solution(self):
        """
        This method implements the IP solution.
        Args:
            - self: Sudoku instance, to be solved using IP.
        Returns:
            - flag: boolean, whether the Sudoku is solvable.
            If True, the completed Sudoku is available through the attribute
            "_solution" and the completion time through "_duration".
        """

        # List of available digits
        Digits = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]

        Values = Digits
        Rows = Digits
        Columns = Digits

        # Creating the block list
        Blocks = []
        for i in range(3):
            for j in range(3):
                Blocks += [[(Rows[3 * i + k], Columns[3 * j + l])
                            for k in range(3) for l in range(3)]]

        # We need to create the IP problem
        prob = pulp.LpProblem("Sudoku solver", pulp.LpMinimize)

        # Then we create the problem variables
        choices = pulp.LpVariable.dicts("Choice", (Rows, Columns, Values), 0,
                                        1, pulp.LpInteger)

        # We define the objective function which is 0 here
        prob += 0, "Objective Function"

        # Defining the constraint: only one value can be put in a cell
        for r in Rows:
            for c in Columns:
                prob += pulp.lpSum([choices[r][c][v] for v in Values]) == 1, ""

        for v in Values:
            # Each value must occur exactly once in each row
            for r in Rows:
                prob += pulp.lpSum([choices[r][c][v]
                                    for c in Columns]) == 1, ""

            # Each value must occur exactly once in each column
            for c in Columns:
                prob += pulp.lpSum([choices[r][c][v] for r in Rows]) == 1, ""

            # Each value must occur exactly once in each block
            for b in Blocks:
                prob += pulp.lpSum([choices[r][c][v] for (r, c) in b]) == 1, ""

        # We need to add the starting numbers as constraints
        grid = self.grid
        for r in range(len(grid)):
            for c in range(len(grid[0])):
                value = grid[r][c]
                if value != 0:
                    prob += choices[str(int(r + 1))][str(int(c + 1))][str(
                        int(value))] == 1, ""

        solve_value = prob.solve()

        # flag is True is solve_value evaluates to 1 i.e. the Sudoku can be
        # solved
        flag = solve_value == 1

        if flag:
            solution_grid = np.zeros((9, 9))
            for r in Rows:
                for c in Columns:
                    for v in Values:
                        if choices[r][c][v].value() == 1.0:
                            solution_grid[int(r) - 1][int(c) - 1] = v
            self._solution = solution_grid
        return flag
예제 #32
0
 def set_objective(self, variables, coefficients):
     self.prob += lpSum([variable * coefficient for variable, coefficient in zip(variables, coefficients)])
def baruah_algorithm(wcet):
    task_count, core_count = wcet.shape

    # Create the model
    model = LpProblem(name="LPR-feas", sense=LpMinimize)

    # Initialize the decision variables
    x_variables = dict()
    for t in range(task_count):
        for j in range(core_count):
            if (t, j) not in x_variables:
                x_variables[(t, j)] = LpVariable(name="x_task{}_cpu{}".format(
                    t, j),
                                                 lowBound=0)

    u_variable = LpVariable(name="U", lowBound=0)

    # Add the constraints to the model
    for t in range(task_count):
        model += (lpSum([x_variables[(t, j)] for j in range(core_count)]) == 1,
                  "One_assignment_constraint{}".format(t))

    for j in range(core_count):
        model += (lpSum(
            [x_variables[(t, j)] * wcet[t, j]
             for t in range(task_count)]) <= u_variable,
                  "EDF constraint on CPU{}".format(j))

    # Add the objective function to the model
    obj_func = lpSum(u_variable)
    model += obj_func

    # The problem data is written to an .lp file
    # model.writeLP("Baruah_LP.lp")

    # Solve the problem
    # status = model.solve()
    status = model.solve(PULP_CBC_CMD(msg=0, timeLimit=900))

    #print("-------------------------------------------------------")
    #print(f"status: {model.status}, {LpStatus[model.status]}")
    #print(f"objective: {model.objective.value()}")

    # create bipartite graph
    B = nx.Graph()

    partitioning = dict()
    for t in range(task_count):
        B.add_nodes_from(["task{}".format(t)], bipartite=0)
        partitioning["task{}".format(t)] = None
    for j in range(core_count):
        B.add_nodes_from(["cpu{}".format(j)], bipartite=1)

    for var in model.variables():
        #print(f"{var.name}: {var.value()}")

        if var.name == "U":
            if var.value() > 1:
                #raise Exception("Invalid LP solution!")
                return np.inf, "Invalid LP solution!"

        else:
            if var.value() > 0:
                #print(var.name)
                string_parts = var.name.split("_")
                task = int(string_parts[1][4:])
                cpu = int(string_parts[2][3:])
                B.add_edges_from([("task{}".format(task), "cpu{}".format(cpu))
                                  ])

    #X, Y = bipartite.sets(B)
    draw = False
    try:
        X, Y = bipartite.sets(B)
    except:
        draw = False
        #plt.close()
        #nx.draw(B, with_labels=True)
        #plt.show()

        X = {n for n, d in B.nodes(data=True) if d["bipartite"] == 0}
        Y = set(B) - X

    #nx.draw(B, with_labels=True, pos = nx.drawing.layout.bipartite_layout(B, X) )
    #plt.show()
    #plt.close()

    #nx.draw(B, with_labels=True)
    #plt.show()

    # delete exact mappings' tasks
    task_vertices_to_delete = []
    for task in X:
        #print("Degree of {} is {}".format(task,B.degree[task]))
        if B.degree[task] == 1:
            task_vertices_to_delete.append(task)
            partitioning[task] = list(B.neighbors(task))[0]
    for task in task_vertices_to_delete:
        B.remove_node(task)

    #plt.close()

    #if draw:
    #    nx.draw(B, with_labels=True)
    #    plt.show()

    # TODO: find cycles

    # create bipartite graph
    """
    B = nx.Graph()
    B.add_nodes_from(["task0"], bipartite=0)
    B.add_nodes_from(["cpu0"], bipartite=1)
    B.add_nodes_from(["task1"], bipartite=0)
    B.add_nodes_from(["cpu1"], bipartite=1)
    B.add_edges_from([("task0", "cpu0")])
    B.add_edges_from([("task0", "cpu1")])
    B.add_edges_from([("task1", "cpu0")])
    B.add_edges_from([("task1", "cpu1")])
    """

    cycle = False
    try:
        cycle = list(nx.find_cycle(B, orientation="ignore"))
        print(cycle)
        cycle = True
    except:
        pass

    if cycle:
        raise Exception("Cycle is found")

    #unassigned_tasks, cpus = bipartite.sets(B)
    unassigned_tasks = {
        n
        for n, d in B.nodes(data=True) if d["bipartite"] == 0
    }
    cpus = set(B) - unassigned_tasks
    while len(unassigned_tasks) != 0:

        for t, c in partitioning.items():
            if c != None:
                try:
                    B.remove_node(t)
                except nx.exception.NetworkXError:
                    pass

        root = None
        tasks = {n for n, d in B.nodes(data=True) if d["bipartite"] == 0}
        cpus = set(B) - tasks
        for task in tasks:
            if B.degree[task] == 1:
                root = task
        if root == None:
            root_cpu = random.choice(list(cpus))
            B.add_node("arbitrary_task", bipartite=0)
            B.add_edges_from([("arbitrary_task", root_cpu)])
            root = "arbitrary_task"
            unassigned_tasks.add("arbitrary_task")

        #if draw:
        #    plt.close()
        #    nx.draw(B, with_labels=True)
        #    plt.show()

        visited_nodes = [root]
        unvisited_nodes = []
        task_cpus = B.neighbors(root)
        task_cpus = list(task_cpus)
        unvisited_nodes.extend(task_cpus)
        unvisited_nodes = list(set(unvisited_nodes))
        mapped_cpu = random.choice(task_cpus)
        #print("task {} -> cpu {}".format(root,mapped_cpu))
        partitioning[root] = mapped_cpu
        unassigned_tasks.remove(root)
        while unvisited_nodes != []:
            start_node = unvisited_nodes[0]
            visited_nodes.append(start_node)
            unvisited_nodes.remove(start_node)
            neigh = B.neighbors(start_node)
            for node in neigh:
                if node not in visited_nodes:
                    unvisited_nodes.append(node)

            if "task" in start_node:
                task_cpus = B.neighbors(start_node)
                task_cpus = [i for i in task_cpus if i not in visited_nodes]
                mapped_cpu = random.choice(task_cpus)
                #print("task {} -> cpu {}".format(start_node, mapped_cpu))
                partitioning[start_node] = mapped_cpu
                unassigned_tasks.remove(start_node)

    #for name, constraint in model.constraints.items():
    #    print(f"{name}: {constraint.value()}")

    #print("Solver: {}".format(model.solver))

    if status == 1:
        used_cpus = []
        try:
            partitioning.pop("arbitrary_task")
        except KeyError:
            pass
        #check if partitioning valid
        for task, cpu in partitioning.items():
            if cpu == None:
                raise Exception("There is a no mapping")
            used_cpus.append(cpu)
        for cpu_iter in range(core_count):
            cpu = "cpu{}".format(cpu_iter)

            sum_utilization = 0
            for task, cpu_ in partitioning.items():
                if cpu == cpu_:
                    sum_utilization += wcet[int(task[4:])][cpu_iter]
            if sum_utilization > 1.0001:
                return np.inf, "Overloaded cpu '{}': {}".format(
                    cpu, sum_utilization)
        used_cpus = list(set(used_cpus))
        return len(used_cpus), "OK"

    else:
        # print("OPT is not optimal solution! Reason: {}".format(LpStatus[model.status]))
        return np.inf, LpStatus[model.status]
예제 #34
0
파일: task2.py 프로젝트: elgator/linprog
def accumulate(var, i) -> pulp.pulp.LpAffineExpression:
    return pulp.lpSum([var[k] for k in range(i + 1)])
예제 #35
0
prob = dippy.DipProblem("Coke", LpMinimize)

# create variables
buildVars = LpVariable.dicts("Build", LOC_SIZES, None, \
                             None, LpBinary)
prob.buildVars = buildVars

# create arcs
flowVars = LpVariable.dicts("Arcs", ARCS)
for a in ARCS:
    flowVars[a].bounds(0, BIG_M)

prob.SIZES = SIZES

# objective
prob += 1e6 * lpSum(buildVars[(l, s)] * SIZE_COSTS[s] \
                    for (l, s) in LOC_SIZES) + \
              lpSum(flowVars[(s, d)] * ARC_COSTS[(s, d)] \
                    for (s, d) in ARCS), "min"

# plant availability
for loc in LOCATIONS:
    prob += lpSum(flowVars[(loc, i)] for i in CUSTOMERS) \
            <= lpSum(buildVars[(loc, s)] *s for s in SIZES)

# one size
for loc in LOCATIONS:
    prob += lpSum(buildVars[(loc, s)] for s in SIZES) == 1

# conserve flow (mines)
# flows are in terms of tonnes of coke
for m in MINES:
예제 #36
0
    def __addVarConstraint(self, zInit):
        '''
        Add constraints on the variables (Includes state transition constraint)
        '''
        # Constraints on state parameters
        # x[0] == xInit
        for x_var, xi in zip(self.x[0].values(), self.xInit):
            self.prob.addConstraint(x_var == xi)

        for x_var, xi in zip(self.x[self.__N].values(), self.xT):
            self.prob.addConstraint(x_var == xi)

        if False:  #if self.margin != None:
            # Add the boundary constraints
            xmin = min(self.xInit[0], self.xT[0]) - self.margin
            xmax = max(self.xInit[0], self.xT[0]) + self.margin
            ymin = min(self.xInit[1], self.xT[1]) - self.margin
            ymax = max(self.xInit[1], self.xT[1]) + self.margin

            for idx in range(1, self.__N):
                for x_var, xm in zip(self.x[idx].values(), [xmin, ymin]):
                    self.prob.addConstraint(x_var >= xm)
                for x_var, xm in zip(self.x[idx].values(), [xmax, ymax]):
                    self.prob.addConstraint(x_var <= xm)

        # Constraints on intermediate variables
        if self.receding_horizon:
            limit = self.__N - 1
        else:
            limit = self.__N

        for k in range(limit):
            # absu >= u
            # absu + u >= 0
            for i in range(self.__nu):
                self.prob.addConstraint(self.absu[k][i] - self.u[k][i] >= 0)
                self.prob.addConstraint(self.absu[k][i] + self.u[k][i] >= 0)

            # State Transition modelled as a constraint
            # x[k+1] == A*x[k] + B*u[:,k]
            for x_var, a, b in zip(self.x[k + 1].values(), self.__A, self.__B):
                self.prob.addConstraint((x_var - pulp.lpSum([(ai * xi) for ai, xi in zip(a, self.x[k].values())]) \
                            - pulp.lpSum([(bi * ui) for bi, ui in zip(b, self.u[k].values())])) == 0)

        # Lower bound on the horizon radius
        if self.receding_horizon:
            self.prob.addConstraint(self.dlast >= 0)

        # Constraints the distrance between adjacent points
        for kk in range(1, self.__N + 1 - self.receding_horizon):
            currx = self.x[kk].values()
            prevx = self.x[kk - 1].values()
            self.prob.addConstraint(self.d[kk - 1] >= 0)
            for side in range(self.poly_nsides):
                # parameters that determine the side of the polygon
                line_angle = [math.cos(2*side*math.pi/self.poly_nsides), \
                                        math.sin(2*side*math.pi/self.poly_nsides)]

                # Add constraints between the last step and the goal point
                # This ensures that the goal is within the horizon of the last step
                self.prob.addConstraint(
                    pulp.lpSum([
                        m * (x1 - x2)
                        for m, x1, x2 in zip(line_angle, currx, prevx)
                    ]) <= self.d[kk - 1])

        if self.receding_horizon:
            xlaststep = self.x[self.__N - 1].values()
            xgoal = self.x[self.__N].values()
            for side in range(500):
                # parameters that determine the side of the polygon
                line_angle = [math.cos(2*side*math.pi/self.poly_nsides), \
                                        math.sin(2*side*math.pi/self.poly_nsides)]

                # Add constraints between the last step and the goal point
                # This ensures that the goal is within the horizon of the last step
                self.prob.addConstraint(
                    pulp.lpSum([
                        m * (x1 - x2)
                        for m, x1, x2 in zip(line_angle, xgoal, xlaststep)
                    ]) <= self.dlast)

        # \sum_{i} z_{i} == dim(z_{i}) - 1 constraint
        for k in range(limit):
            for i in range(self.__nObst):
                self.prob.addConstraint(pulp.lpSum([self.z[k][i][j] for j in range(self.__dObst)]) == \
                                         self.__dObst-1, name='z_%d_%d'%(k, i))
예제 #37
0
 def addAllZConstraints(self):
     for k in range(self.__N):
         for i in range(self.__nObst):
             self.prob.addConstraint(pulp.lpSum([self.z[k][i][j] for j in range(self.__dObst)]) == \
                                      self.__dObst-1, name='z_%d_%d'%(k, i))
     return
예제 #38
0
#Csums=pulp.LpVariable.dicts("line_prod",(prod_line,orderList),0,None, pulp.LpInteger)
#r=pulp.LpVariable.dicts("release",(prod_line,orderList),0)
#compD=pulp.LpVariable.dicts("compDate",(prod_line,orderList),0)
#CLines=pulp.LpVariable("CLines",[j for j in line_date] ,0) #total amount of all lines by day
#Pday=pulp.LpVariable.dicts("Processdays",(prod_line,orderList),0)
#AM=pulp.LpVariable.dicts('planAmountEveryorder',(prod_line,orderList,plan_dates),0)
#Create the 'prob' variable to contain the problem data
#prob=pulp.LpProblem("The APS Problem",pulp.LpMinimize)

#objective function: consider order priority and leadtime
#eps=1e-2 
#lambda compD[l][o] if compD[l][o]<=len(plan_dates) else len(plan_dates)
#adt.model_total_volume(order_spd[(order_spd['order_id']==o)&(order_spd['line_no']==l)][['day_process','num_by_day']],adt.prod_days(compD[l][o],len(plan_dates),r[l][o]),len(plan_dates))
#+pulp.lpSum([mp[i]*x[i][(i,j,t)]*(t-g[i][j]) for i in modelList for j in Md[i] for t in T[i][j] if t>g[i][j]])+
#pulp.lpSum([h[i][(i,t)]*t for i in modelList for t in Tm[i]])
prob+=pulp.lpSum([x[i][(i,j,t)]*(t-g[i][j]) for i in modelList for j in Md[i] for t in T[i][j] if t>g[i][j]])

#pulp.lpSum([orderPool['priority'][o]*orderPool['order_type'][o]*\
                  #Csums[l][o] for o in orderList for l in prod_line]) pulp.lpSum([x[i][(i,j,t)]*(t-g[i][j]) for i in modelList for j in Md[i] for t in T[i][j] if t>g[i][j]])

#prob+=Cmax+eps*lpSum([r[j] for j in order_line])-eps*[compD[j] for j in order_line]
#The constraints
#1. every order has to be completed before due date
#2. relationships between release date and due date
#3. release date>=max(0,epst-plan_dates[0]) 
#4.fixed sum across day and lines equal total num of orders
for i in modelList:
    #prob+=pulp.lpSum([LS[i][i][l] for l in prod_line])>=1
    #prob+=pulp.lpSum([k[i][(i,l,m)] for l in prod_line for m in P[i][l][0]])==pulp.lpSum([LS[i][(i,j,l)] for j in Md[i] for l in prod_line])
    #限定分割总量
    #prob+=pulp.lpSum([k[i][(i,l,m)]*P[i][l][1][m] for l in model_line[model_line['model_no']==i]['line_no'] for m in P[i][l][0]])==modelSum[i]
                                pulp.LpMaximize)

## Two variables: Cones & Sorbets

icecream_types = ['Cones', 'Sorbets']

## Declare variables

icecream_types = pulp.LpVariable.dicts("Icecream type",
                                       ((i) for i in icecream_types),
                                       lowBound=0,
                                       cat='Continuous')

## Maximize profit

model_icecream += pulp.lpSum(3 * icecream_types['Cones'] +
                             2 * icecream_types['Sorbets'])

## Constraints

MaxSupply = {'Cones': 10, 'Sorbets': 15}

## MaxSupply

for i in icecream_types:
    model_icecream += icecream_types[i] <= MaxSupply[i]

print(icecream_types['Cones'])

print(model_icecream)

model += pulp.lpSum([ing_weight['premium', j]
예제 #40
0
def multiple_pairs(dist_matrices,
                   voi_ind_to_S_sets,
                   threshold=None,
                   api='py-mip',
                   solver='pulp',
                   variable_num_tol=0.001,
                   max_seconds=np.inf):
    """
    Doc string to be written.
    """
    voi_indices = list(voi_ind_to_S_sets.keys())
    n_voi = len(voi_indices)
    S_sets = list(voi_ind_to_S_sets.values())

    M = np.max(np.max(abs(dist_matrices), axis=0))

    a, b, c = dist_matrices.shape

    if b == c:
        J = a
        n = b
        dist_matrices = dist_matrices.transpose((1, 2, 0))[voi_indices]
    else:
        n = b
        J = c

    voi_to_Q_indices = {
        voi: np.array([
            int(i) for i in np.concatenate((range(0, voi), range(voi + 1, n)))
            if i not in voi_ind_to_S_sets[voi]
        ])
        for voi in voi_indices
    }

    if variable_num_tol is not None:
        up_bound = int(np.math.ceil(1 / variable_num_tol))
        variable_num_tol = 1 / up_bound
        cat = 'Integer'
    else:
        up_bound = 1
        cat = 'Continuous'

    if threshold is not None:
        if b == c:
            raise ValueError('dist_matrices not shaped correctly')
        temp_dist_matrices = np.zeros((n_voi, n, J))
        for i, voi in enumerate(voi_indices):
            temp_ranks = evaluate_best_vertices(dist_matrices[i], np.arange(J),
                                                S_sets[i])
            temp_dist_matrices[i] = edit_dist_matrices(dist_matrices[i],
                                                       S_sets[i], temp_ranks,
                                                       threshold)

    if api == 'pulp':
        model = pulp.LpProblem(sense=pulp.LpMinimize)

        inds = pulp.LpVariable.dicts("indicators for elements not in S",
                                     ('voi' + str(voi) + str(q)
                                      for voi in voi_indices
                                      for q in voi_to_Q_indices[voi]),
                                     cat='Integer',
                                     upBound=1,
                                     lowBound=0)

        weights = pulp.LpVariable.dicts("weights for representations",
                                        (j for j in range(J)),
                                        cat=cat,
                                        upBound=up_bound,
                                        lowBound=0)

        model += (pulp.lpSum([
            inds[('voi' + str(voi) + str(q))] for voi in voi_indices
            for q in voi_to_Q_indices[voi]
        ]))

        model += (pulp.lpSum([weights[(j)] for j in range(J)]) == up_bound)

        for i, voi in enumerate(voi_indices):
            dist_matrix = dist_matrices[i, :, :]
            for s in voi_ind_to_S_sets[voi]:
                for q in voi_to_Q_indices[voi]:
                    model += (pulp.lpSum([
                        weights[(j)] * dist_matrix[s, j] for j in range(J)
                    ]) <= pulp.lpSum(
                        [weights[(j)] * dist_matrix[q, j]
                         for j in range(J)]) + pulp.lpSum(inds[
                             ('voi' + str(voi) + str(q))] * M * up_bound))
        try:
            if solver == 'pulp':
                model.solve()
            elif solver == 'coin_cmd':
                model.solve(solver=pulp.COIN_CMD())

            alpha_hat = np.array([w.varValue for w in weights.values()])
        except Exception as e:
            print(e)
            return None
    elif api == 'py-mip':
        model = mip.Model(sense=mip.MINIMIZE)

        # Need to fix inds
        inds = [[
            model.add_var(name='inds', var_type=mip.BINARY)
            for q in voi_to_Q_indices[voi]
        ] for voi in voi_indices]

        #         if variable_num_tol is None:
        weights = [
            model.add_var(name='weights', lb=0.0, ub=up_bound, var_type=cat)
            for j in range(J)
        ]
        model += mip.xsum(w for w in weights) == up_bound
        #         else:
        #             weights = [model.add_var(name='weights', lb=0, ub=up_bound, var_type='I') for j in range(J)]
        #             model += mip.xsum(w for w in weights) == up_bound

        for i, voi in enumerate(voi_indices):
            dist_matrix = dist_matrices[i, :, :]
            for s in voi_ind_to_S_sets[voi]:
                for k, q in enumerate(voi_to_Q_indices[voi]):
                    model += mip.xsum(
                        weights[j] * dist_matrix[s, j]
                        for j in range(J)) <= mip.xsum(
                            weights[j] * dist_matrix[q, j]
                            for j in range(J)) + inds[i][k] * M * up_bound

        model.objective = mip.xsum(mip.xsum(i for i in ind) for ind in inds)
        model.optimize(max_seconds=max_seconds)

        alpha_hat = np.array([w.x for w in weights])

    if alpha_hat[0] is None:
        return None
    else:
        return alpha_hat / np.sum(alpha_hat)
예제 #41
0
def combine_representations(dist_matrix,
                            voi_index,
                            S_indices,
                            return_new_dists=True,
                            threshold=None,
                            solver='coin_cmd',
                            api='py-mip',
                            variable_num_tol=0.001,
                            max_seconds=np.inf):
    """
    A function to find the weights of optimal linear combination of representations.
    
    Input
    dist_matrix - np.array (shape=(n, J))
        Array containing the distances between the vertex of interest and the other n - 1
        vertices.
    voi_index - int
        Index of vertex of interest.
    S_indices - array-like
        Indices of the vertices that should be at the top of the
        nomination list for the vertex of interest.
    return_new_dists - bool
        If true, returns both the weights and the corresponding distance matrix.
    threshold - maximum value of the rank of an element of S_indices.
    solver - solver to use. in {'coin_cmd'}
    api - api to use. in {'gurobi', 'py-mip', 'pulp'}
    variable_num_tol - float in (0, 1]. resolution of the approximation of the continuous weights.
        If None, continuous weights are found.
    max_seconds - float in (0, inf)
        
    Return
    weights - np.array (length=J)
        Array containing the coefficients for the optimal distance function.
    -- optional -- new_dists - np.array (length=n)
        Array containing the distances after applying the learned weight vector. 
    """

    # Grab the shape of the data that was passed int
    n, J = dist_matrix.shape

    # Pre-process the data so that there are no elements of S_indices that are above threshold
    if threshold is not None:
        ranks = evaluate_best_vertices(dist_matrix,
                                       vertices=np.arange(J),
                                       s_star=S_indices)
        dist_matrix = edit_dist_matrices(dist_matrix, S_indices, ranks,
                                         threshold)

    # Grab the maximum value of dist_matrix (to be used as an upper bound later)
    M = np.max(abs(dist_matrix))

    # Grab the number of elements known to be similar to voi_index
    S = len(S_indices)

    # Define an array of integers corresponding to elements not in S_indices
    Q_indices = np.array([
        int(i)
        for i in np.concatenate((range(0, voi_index), range(voi_index + 1, n)))
        if i not in S_indices
    ])

    # Grab the number of elements not known to be similar to voi_index
    Q = len(Q_indices)

    # We can either use continuous weights for the representations or use discrete approximation
    if variable_num_tol is not None:
        # Here, we are using a discrete approximation

        # variable_num_tol is in the interval (0, 1]. If variable_num_tol is close to 0 that means
        # we want our approximation to be of a high resolution. To achieve this, we define 1 / variable_num_tol
        # to be the maximum value that a particular weight can take on.
        # i.e. with variable_num_tol = 0.1, the weights can take on values 0.0, 0.1, 0.2, 0.3, .., 1.
        # We normalize later.
        up_bound = int(np.math.ceil(1 / variable_num_tol))
        variable_num_tol = 1 / up_bound
        cat = 'Integer'
    else:
        # Here, we let the weights be continuous
        up_bound = 1
        cat = 'Continuous'

    # We've implemented the ILP in 3 different APIs: pulp, py-mip and gurobi.
    # Gurobi seems to be the industry standard but licensing is a bit prohibitive.

    # Each API is relatively similar. First, you define a set of variables.
    # Then, using these variables, you define an objective function and a set of constraints that you assign to a model object.

    if api == 'pulp':
        # Define a model.
        model = pulp.LpProblem(sense=pulp.LpMinimize)

        # Define indicator variables (as defined in section 1.2 of https://arxiv.org/pdf/2005.10700.pdf) for every vertex
        # That is not in S_indices.
        # NB: i am more than happy to walk through the paper if that would be helpful!
        inds = pulp.LpVariable.dicts("indicators for elements not in S",
                                     (q for q in Q_indices),
                                     cat='Integer',
                                     upBound=1,
                                     lowBound=0)

        # Define non-negative weight variables for each of the representations.
        weights = pulp.LpVariable.dicts("weights for representations",
                                        (j for j in range(J)),
                                        cat=cat,
                                        upBound=up_bound,
                                        lowBound=0)

        # Set the objective function.
        model += (pulp.lpSum([inds[(q)] for q in Q_indices]))

        # Add constraint that the weights must sum to the upper bound defined by variable_num_tol.
        model += (pulp.lpSum([weights[(j)] for j in range(J)]) == up_bound)

        # Add constraint that elements of S_indices should be closer than elements not in S_indices (or, the Q_indices)
        for s in S_indices:
            for q in Q_indices:
                model += (pulp.lpSum(
                    [weights[(j)] * dist_matrix[s, j]
                     for j in range(J)]) <= pulp.lpSum(
                         [weights[(j)] * dist_matrix[q, j]
                          for j in range(J)]) +
                          pulp.lpSum(inds[(q)] * M * up_bound))

        # Different solvers for api == 'pulp'
        try:
            if solver == 'pulp':
                model.solve()
            elif solver == 'coin_cmd':
                model.solve(solver=pulp.COIN_CMD())

            alpha_hat = np.array([w.varValue for w in weights.values()])
        except Exception as e:
            print(e)
            return None

        alpha_hat = np.array([w.varValue for w in weights.values()])
    elif api == 'gurobi':
        model = gp.Model()

        model.setParam('OutputFlag', 0)

        ind = model.addVars(Q, vtype=GRB.BINARY, name='ind')
        model.setObjective(gp.quicksum(ind), GRB.MINIMIZE)

        w = model.addVars(J, lb=0, ub=1, vtype=GRB.CONTINUOUS, name='w')
        model.addConstr(w.sum() == 1)

        for s in S_indices:
            temp_s = gp.tupledict([((i), dist_matrix[s, i]) for i in range(J)])
            for i, q in enumerate(Q_indices):
                temp_q = gp.tupledict([((i), dist_matrix[q, i])
                                       for i in range(J)])
                model.addConstr(w.prod(temp_s) <= w.prod(temp_q) + ind[i] * M)

        model.optimize()
        alpha_hat = np.array([i.X for i in list(w.values())])

    elif api == 'py-mip':
        model = mip.Model(sense=mip.MINIMIZE)

        inds = [
            model.add_var(name='inds', var_type=mip.BINARY) for q in range(Q)
        ]

        #         if variable_num_tol is None:
        weights = [
            model.add_var(name='weights', lb=0.0, ub=up_bound, var_type=cat)
            for j in range(J)
        ]
        model += mip.xsum(w for w in weights) == 1
        #         else:
        #             weights = [model.add_var(name='weights', lb=0, ub=up_bound, var_type='I') for j in range(J)]
        #             model += mip.xsum(w for w in weights) == up_bound

        for s in S_indices:
            for i, q in enumerate(Q_indices):
                model += mip.xsum(
                    weights[j] * dist_matrix[s, j]
                    for j in range(J)) <= mip.xsum(
                        weights[j] * dist_matrix[q, j]
                        for j in range(J)) + inds[i] * M * up_bound

        model.objective = mip.xsum(ind for ind in inds)
        model.optimize(max_seconds=max_seconds)

        alpha_hat = np.array([w.x for w in weights])
    else:
        raise ValueError("api %s not implemented" % (api))

    # Return the new distances if told to do so.
    if return_new_dists:
        if np.sum(alpha_hat == 0) == J:
            return alpha_hat, dist_matrix
        else:
            return alpha_hat, np.average(dist_matrix,
                                         axis=1,
                                         weights=alpha_hat)

    if alpha_hat[0] is None:
        return None
    else:
        # Normalize
        return alpha_hat / np.sum(alpha_hat)
예제 #42
0
def get_all_tautomers(
    molecule: 'Molecule',
    total_number_hydrogens: Optional[int] = None,
    net_charge: Optional[int] = None,
    enforce_octet_rule: bool = True,
    allow_radicals: bool = False,
    max_tautomers: Optional[int] = 2000,
    disallow_triple_bond_in_small_rings: bool = True,
    disallow_allenes_in_small_rings: bool = True,
    disallow_allenes_completely: bool = True,
    lock_phenyl_rings: bool = True,
    use_gurobi: bool = False,
    maximum_number_hydrogens_per_atom: Optional[int] = 3,
    skeletal_rearrangements: Optional[List[Tuple[int, int]]] = None,
    debug: Optional[TextIO] = None,
) -> 'Molecule':
    '''
    Args:
        ``disallow_triple_bond_in_small_rings``: Disallow triple bonds in small rings (rings with size <= ``SMALL_RING``).
        ``disallow_allenes_in_small_rings``: Disallow allenes (=C=) in small rings (rings with size <= ``SMALL_RING``).
        ``lock_phenyl_rings``: Prevent phenyl rings (6 membered rings with all sp3 carbons) from being hydrogenised.
        ``disallow_allenes_completely``: Disallow allenes (=C=) completely.
        ``maximum_number_hydrogens_per_atom``: Maximum number of hydrogen atoms carried by a single heavy atom. Single atom molecules will be allowed ``maximum_number_hydrogens_per_atom + 1`` hydrogens.
        ``skeletal_rearrangements``: Optional list of bonds (tuple of two atom indices) that can potentially be formed or destroyed during the tautomerisation process.

    Returns:
        A list of tautomers (Molecule).
    '''
    print_if_debug = lambda *args: write_to_debug(debug, *args)

    try:
        optional_bonds = set() if skeletal_rearrangements is None else {
            frozenset([atom_1, atom_2])
            for (atom_1, atom_2) in skeletal_rearrangements
        }
    except:
        raise TypeError(
            'Invalid skeletal_rearrangements: {0}. Example: [(1, 2), (1, 3)]'.
            format(skeletal_rearrangements))

    molecule.bonds |= optional_bonds

    if len([1 for atom in molecule.atoms.values() if atom.element == 'H']) > 0:
        molecule.remove_all_hydrogens(mark_all_uncapped=True)

    neighbour_counts = molecule.neighbours_for_atoms()

    def keep_capping_strategy_for_atom(capping_strategy: Capping_Strategy,
                                       atom: Atom) -> bool:
        if atom.valence is not None:
            if False:
                if neighbour_counts[
                        atom.index] + new_atom_for_capping_strategy(
                            capping_strategy) == atom.valence:
                    print_if_debug(atom, capping_strategy)
            return neighbour_counts[
                atom.index] + new_atom_for_capping_strategy(
                    capping_strategy) == atom.valence
        else:
            return min_valence_for(atom) <= neighbour_counts[
                atom.index] + new_atom_for_capping_strategy(
                    capping_strategy) <= max_valence_for(atom)

    assert maximum_number_hydrogens_per_atom >= 0, 'Maximum number of hydrogens should be greater or equal than 0'

    def possible_capping_strategies_for_atom(
            atom: Atom,
            is_phenyl_atom: bool = False) -> List[Capping_Strategy]:
        if is_phenyl_atom and lock_phenyl_rings:
            return [NO_CAP, H_CAP]
        else:
            return ([NO_CAP] + [
                merge_caps(*[H_CAP] * i) for i in range(
                    1, maximum_number_hydrogens_per_atom + 1 +
                    (1 if len(molecule.atoms) == 1 else 0))
            ])

    atoms_need_capping = [
        atom for atom in molecule.sorted_atoms() if not atom.capped
    ]

    print_if_debug([
        possible_capping_strategies_for_atom(
            atom, is_phenyl_atom=(atom.index in molecule.phenyl_atoms))
        for atom in atoms_need_capping
    ], )

    problem = LpProblem(
        "Tautomer enumeration problem for molecule {0}".format(molecule.name),
        LpMinimize)

    ELECTRON_MULTIPLIER = (2 if not allow_radicals else 1)

    non_allene_atoms = {}
    for atom in molecule.atoms.values():
        if disallow_allenes_completely:
            atom_bonds = [
                bond for bond in molecule.bonds if atom.index in bond
            ]
            if atom.element == 'C' and len(atom_bonds) == 2:
                non_allene_atoms[atom] = atom_bonds

    fragment_switches, fragment_scores, fragment_H_scores = {}, {}, {}
    capping_atoms_for = {}
    new_bonds_sets = {}
    for uncapped_atom in atoms_need_capping:
        possible_capping_strategies = possible_capping_strategies_for_atom(
            uncapped_atom,
            is_phenyl_atom=(uncapped_atom.index in molecule.phenyl_atoms))
        if len(possible_capping_strategies) == 0 or len(
                possible_capping_strategies
        ) == 1 and possible_capping_strategies[0] == NO_CAP:
            pass
        else:
            for (i, capping_strategy) in enumerate(
                    sorted(possible_capping_strategies), start=1):
                print_if_debug(uncapped_atom, capping_strategy, i)
                # Add switch variable
                fragment_switches[uncapped_atom.index, i] = LpVariable(
                    'F_{i},{j}'.format(i=uncapped_atom.index, j=i),
                    0,
                    1,
                    LpBinary,
                )

                new_atoms, new_bonds = molecule.extend_molecule_with(
                    uncapped_atom, capping_strategy)
                print_if_debug(i, [atom for atom in new_atoms])
                capping_atoms_for[uncapped_atom.index, i] = new_atoms
                new_bonds_sets[uncapped_atom.index, i] = [
                    bond for bond in new_bonds if uncapped_atom.index in bond
                ]
                fragment_scores[uncapped_atom.index,
                                i] = len(capping_atoms_for[uncapped_atom.index,
                                                           i])
                fragment_H_scores[uncapped_atom.index, i] = len([
                    atom for atom in capping_atoms_for[uncapped_atom.index, i]
                    if atom.element == 'H'
                ])

            # Only choose one capping strategy at a time
            problem += (lpSum(F_i for ((atom_id, _),
                                       F_i) in fragment_switches.items()
                              if atom_id == uncapped_atom.index) == 1,
                        'Single capping strategy for atom {atom_desc}'.format(
                            atom_desc=atom_short_desc(uncapped_atom)))

    all_capping_atoms = {
        atom
        for atoms in capping_atoms_for.values() for atom in atoms
    }
    all_capping_atom_ids = {atom.index for atom in all_capping_atoms}
    non_capping_atoms = [
        atom for atom in molecule.atoms.values()
        if atom.index not in all_capping_atom_ids
    ]
    fragment_switch_for_bond = {
        bond: fragment_switch
        for ((uncapped_atom_id, fragment_id),
             fragment_switch) in fragment_switches.items()
        for bond in new_bonds_sets[uncapped_atom_id, fragment_id]
    }
    by_switch_name = lambda T: T[1].name
    get_bond = lambda T: T[0]
    bonds_for_fragment_switch = {
        key: [get_bond(T) for T in group]
        for (key, group) in chain(
            groupby(
                sorted(
                    fragment_switch_for_bond.items(),
                    key=by_switch_name,
                ),
                key=by_switch_name,
            ),
            # Switches with no bonds, which are being "ignored" by groupby
            [(fragment_switches[uncapped_atom_id, i].name, [])
             for ((uncapped_atom_id, i), bonds) in new_bonds_sets.items()
             if len(bonds) == 0])
    }

    non_capping_bonds = {
        bond
        for bond in molecule.bonds if len(bond & all_capping_atom_ids) == 0
    }

    if True:
        molecule.write_graph('debug')

    charges = {
        atom.index:
        LpVariable("C_{i}".format(i=atom.index), -MAX_ABSOLUTE_CHARGE,
                   MAX_ABSOLUTE_CHARGE, LpInteger)
        for atom in non_capping_atoms
    }
    original_charges = list(charges.values())

    # Extra variable use to bind charges
    absolute_charges = {
        atom_id: LpVariable("Z_{i}".format(i=atom_id), MIN_ABSOLUTE_CHARGE,
                            MAX_ABSOLUTE_CHARGE, LpInteger)
        for atom_id in charges.keys()
    }

    non_bonded_electrons = {
        atom_id:
        LpVariable("N_{i}".format(i=atom_id), 0,
                   MAX_NONBONDED_ELECTRONS // ELECTRON_MULTIPLIER, LpInteger)
        for (atom_id, atom) in molecule.atoms.items()
    }

    # Maps a bond to an integer
    bond_mapping = {bond: i for (i, bond) in enumerate(molecule.bonds)}

    # Maps an integer to a bond
    bond_reverse_mapping = {v: k for (k, v) in bond_mapping.items()}
    bond_key = lambda bond: ','.join(map(str, sorted(bond)))

    if disallow_triple_bond_in_small_rings:
        print_if_debug(
            'Note: Excluding triple bonds in small rings (<= {0})'.format(
                SMALL_RING))
        bonds_in_small_rings = bonds_in_small_rings_for(molecule)
    else:
        bonds_in_small_rings = set()

    bond_orders = {
        bond: LpVariable(
            "B_{bond_key}".format(bond_key=bond_key(bond)),
            MIN_BOND_ORDER if bond not in optional_bonds else 0,
            MAX_BOND_ORDER if bond not in bonds_in_small_rings else 2,
            LpInteger,
        ) if bond not in fragment_switch_for_bond else
        fragment_switch_for_bond[bond]
        for bond in molecule.bonds
    }

    OBJECTIVES = [
        MIN(lpSum(absolute_charges.values())),
    ]

    H_size = lpSum([
        F_i * fragment_H_scores[uncapped_atom_id, i]
        for ((uncapped_atom_id, i), F_i) in fragment_switches.items()
    ])

    if total_number_hydrogens is not None:
        problem += H_size == total_number_hydrogens, 'Total number of hydrogens={0}'.format(
            total_number_hydrogens)

    total_size_objective = MIN(
        lpSum([
            F_i * fragment_scores[uncapped_atom_id, i]
            for ((uncapped_atom_id, i), F_i) in fragment_switches.items()
        ]))
    if sum([
            fragment_scores[uncapped_atom_id, i]
            for ((uncapped_atom_id, i), F_i) in fragment_switches.items()
    ]) != 0:
        OBJECTIVES.append(total_size_objective)

    OBJECTIVES.extend([
        MIN(
            lpSum([
                charge * ELECTRONEGATIVITIES[molecule.atoms[atom_id].element]
                for (atom_id, charge) in charges.items()
            ])),
        MIN(
            lpSum([
                bond_order *
                ELECTRONEGATIVITIES[molecule.atoms[atom_id].element]
                for (bond, bond_order) in bond_orders.items()
                for atom_id in bond
            ])),
    ])

    if net_charge is not None:
        problem += (lpSum(charges.values()) == net_charge,
                    'Known net charge={0}'.format(net_charge))

    for atom in non_capping_atoms:
        problem += (
            charges[atom.index] == VALENCE_ELECTRONS[atom.element] - lpSum([
                bond_orders[bond]
                for bond in molecule.bonds if atom.index in bond
            ]) - ELECTRON_MULTIPLIER * non_bonded_electrons[atom.index],
            'Electron balance for atom {element}_{index}'.format(
                element=atom.element, index=atom.index),
        )

    # Deal with absolute values
    for atom in non_capping_atoms:
        problem += charges[atom.index] <= absolute_charges[
            atom.index], 'Absolute charge contraint left {i}'.format(
                i=atom.index)
        problem += -charges[atom.index] <= absolute_charges[
            atom.index], 'Absolute charge contraint right {i}'.format(
                i=atom.index)

        if enforce_octet_rule and atom not in all_capping_atoms:
            if atom.element not in {'B', 'BE', 'P', 'S'}:
                problem += (
                    ELECTRONS_PER_BOND * lpSum([
                        bond_orders[bond]
                        for bond in molecule.bonds if atom.index in bond
                    ]) +
                    ELECTRON_MULTIPLIER * non_bonded_electrons[atom.index] == (
                        2 if atom.element in {'H', 'HE'} else 8),
                    'Octet for atom {element}_{index}'.format(
                        element=atom.element, index=atom.index),
                )

    for (atom, (bond_1, bond_2)) in non_allene_atoms.items():
        new_allene_switch = LpVariable('A_{i}'.format(i=atom.index), 0, 1,
                                       LpBinary)
        problem += 2 * bond_orders[bond_1] - bond_orders[
            bond_2] + 4 * new_allene_switch >= 3
        problem += 2 * bond_orders[bond_1] - bond_orders[
            bond_2] + 4 * new_allene_switch <= 5

    for atom in atoms_in_small_rings_for(molecule):
        if disallow_allenes_in_small_rings:
            if atom.element in {'C', 'N'}:
                adjacent_non_hydrogen_bonds = [
                    bond for bond in non_capping_bonds if atom.index in bond
                ]
                if len(adjacent_non_hydrogen_bonds) == 2:
                    problem += sum(
                        bond_orders[bond]
                        for bond in adjacent_non_hydrogen_bonds
                    ) <= 3, 'No allenes for atom {atom_desc} in short ring'.format(
                        atom_desc=atom_short_desc(atom))

    def debug_failed_ILP(n: Optional[int] = None) -> None:
        debug_filename = 'debug{0}.lp'.format(
            '' if n is None else '_{n}'.format(n=n))
        problem.writeLP(debug_filename)
        print('Failed LP written to "{0}"'.format(debug_filename))

    def encode_solution() -> int:
        # bitshift is faster than multiplication by 2**i
        return sum(
            int(v.varValue) << i
            for i, v in enumerate(fragment_switches.values()))

    def new_molecule_for_current_solution(
            n: Optional[int] = None) -> 'Molecule':
        new_molecule = deepcopy(molecule)
        if n is not None:
            new_molecule.name += '_tautomer_{0}'.format(n)

        DELETE_FAILED_CAPS = True

        new_molecule.formal_charges, new_molecule.bond_orders, new_molecule.non_bonded_electrons = {}, {}, {}
        atoms_to_remove = set()
        for v in problem.variables():
            variable_type, variable_substr = v.name.split('_')
            if variable_type == 'C':
                atom_index = int(variable_substr)
                new_molecule.formal_charges[atom_index] = MUST_BE_INT(
                    v.varValue)
            elif variable_type == 'B':
                if False:
                    bond_index = int(variable_substr)
                    new_molecule.bond_orders[
                        bond_reverse_mapping[bond_index]] = MUST_BE_INT(
                            v.varValue)
                else:
                    bond, bond_order = frozenset(
                        map(int, variable_substr.split(','))), MUST_BE_INT(
                            v.varValue)
                    if bond in optional_bonds:
                        if bond_order > 0:
                            print_if_debug('new_bond', bond, bond_order, [
                                new_molecule.atoms[atom_index]
                                for atom_index in bond
                            ])
                            new_molecule.bond_orders[bond] = bond_order
                        else:
                            print_if_debug('no_new_bond', bond, bond_order, [
                                new_molecule.atoms[atom_index]
                                for atom_index in bond
                            ])
                            new_molecule.bonds.remove(bond)
                    else:
                        if bond_order == 0:
                            print_if_debug('removing_bond', bond, bond_order, [
                                new_molecule.atoms[atom_index]
                                for atom_index in bond
                            ])
                            new_molecule.bonds.remove(bond)
                        else:
                            new_molecule.bond_orders[bond] = bond_order
            elif variable_type == 'Z':
                pass
            elif variable_type == 'A':
                pass
            elif variable_type == 'N':
                atom_index = int(variable_substr)
                new_molecule.non_bonded_electrons[atom_index] = MUST_BE_INT(
                    v.varValue) * ELECTRON_MULTIPLIER
            elif variable_type == 'F':
                uncapped_atom_id, capping_strategy_id = map(
                    int, variable_substr.split(','))
                if MUST_BE_INT(v.varValue) == 0 and DELETE_FAILED_CAPS:
                    atoms_to_remove.add(
                        (uncapped_atom_id, capping_strategy_id))
                for bond in bonds_for_fragment_switch[v.name]:
                    new_molecule.bond_orders[bond] = MUST_BE_INT(v.varValue)
            elif variable_type == 'S':
                capping_atom_id = int(variable_substr)
            else:
                raise Exception(
                    'Unknown variable type: {0}'.format(variable_type))

        for atom_index in all_capping_atom_ids:
            # Manually add electronic properties of hydrogens: charge=0, non_bonded_electrons=0
            new_molecule.formal_charges[atom_index] = 0
            new_molecule.non_bonded_electrons[atom_index] = 0

        if DELETE_FAILED_CAPS:
            for (uncapped_atom_id, capping_strategy_id) in atoms_to_remove:
                new_molecule.remove_atoms(
                    atom for atom in capping_atoms_for[uncapped_atom_id,
                                                       capping_strategy_id])

        if not allow_radicals and False:
            assert all([
                nonbonded_electrons % 2 == 0 for nonbonded_electrons in
                new_molecule.non_bonded_electrons.values()
            ]), {
                new_molecule.atoms[atom_index]: electrons
                for (atom_index,
                     electrons) in new_molecule.non_bonded_electrons.items()
                if electrons % 2 == 1
            }

        new_molecule.update_valences()
        new_molecule.assign_aromatic_bonds()
        new_molecule.assert_molecule_coherence()
        return new_molecule

    # Solve once to find optimal solution with lowest encode_solution()
    try:
        problem.sequentialSolve(OBJECTIVES, timeout=ILP_SOLVER_TIMEOUT)
        assert problem.status == 1, (molecule.name, LpStatus[problem.status])
    except Exception as e:
        debug_failed_ILP(0)
        raise

    if debug is not None:
        debug_failed_ILP(0)

    all_tautomers = [new_molecule_for_current_solution(n=0)]

    # Remove redundant constraints from previous multi-objective optimistion
    for constraint_name in [
            '1_Sequence_Objective', '2_Sequence_Objective',
            '3_Sequence_Objective'
    ]:
        del problem.constraints[constraint_name]

    solutions = []
    # Iterate until no more tautomers are found
    for n in (count(1) if max_tautomers is None else range(1, max_tautomers)):
        # exclude current optimal solution F*: \sum_{i}|F_i - F*_i| >= 1
        problem += \
            sum(v for v in fragment_switches.values() if not v.varValue) + \
            sum(1 - v for v in fragment_switches.values() if v.varValue) >= 1,\
            'Solution {n}'.format(n=n)

        s = encode_solution()
        print_if_debug('Excluding solution {0}'.format(s))
        solutions.append(s)

        try:
            problem.solve(
                solver=GUROBI_CMD() if use_gurobi else None,
                timeout=ILP_SOLVER_TIMEOUT,
            )
        except Exception as e:
            debug_failed_ILP(n)
            raise

        if problem.status == 1:
            pass
        else:
            print_if_debug(problem.status, LpStatus[problem.status])
            if debug is not None:
                debug_failed_ILP(n)
            break

        all_tautomers.append(new_molecule_for_current_solution(n=n))

    # check that all solutions are unique
    assert len(solutions) == len(set(solutions))
    return unique_molecules(all_tautomers)
예제 #43
0
def optimizeTrajectory(N, T, T_end, V, P, W, M, m, TSFC, minApproachDist, x_ini, x_fin, x_lim, u_lim, objects, r):

  # --------------------
  # Calculated Variables
  # --------------------
  del_t = T_end/T
  numObjects = objects.shape[0]

  # ------------------
  # Decision variables
  # ------------------
  # Thrust
  u = pulp.LpVariable.dicts(
    "input", ((i, p, n) for i in range(T) for p in range(V) for n in range(N)),
    cat='Continuous')
  # Thrust Magnitude
  v = pulp.LpVariable.dicts(
    "inputMag", ((i, p, n) for i in range(T) for p in range(V) for n in range(N)),
    lowBound=0,
    cat='Continuous')
  # State
  x = pulp.LpVariable.dicts(
    "state", ((i, p, k) for i in range(T) for p in range(V) for k in range(2*N)),
    cat='Continuous')
  # Object collision
  a = pulp.LpVariable.dicts(
    "objCol", ((i, p, l, k) for i in range(T) for p in range(V) for l in range(numObjects) for k in range(2*N)),
    cat='Binary')
  # Satellite Collision Avoidance
  b = pulp.LpVariable.dicts(
    "satelliteCollisionAvoidance", ((i, p, q, k) for i in range(T) for p in range(V) for q in range(V) for k in range(2*N)),
    cat='Binary')
  # Plume Impingement
  c_plus = pulp.LpVariable.dicts(
    "plumeImpingementPositive", ((i, p, q, n, k) for i in range(T) for p in range(V) for q in range(V) for n in range(N) for k in range(2*N)),
    cat='Binary')
  c_minus = pulp.LpVariable.dicts(
    "plumeImpingementNegative", ((i, p, q, n, k) for i in range(T) for p in range(V) for q in range(V) for n in range(N) for k in range(2*N)),
    cat='Binary')

  # ------------------
  # Optimization Model
  # ------------------

  # Instantiate Model
  model = pulp.LpProblem("Satellite Fuel Minimization Problem", pulp.LpMinimize)
  # Objective Function
  model += pulp.lpSum(v[i, p, n] for i in range(T) for p in range(V) for n in range(N)), "Fuel Minimization"

  # -----------
  # Constraints
  # -----------

  # Basic Constraints
  # -----------------
  # Constrain thrust magnitude to abs(u[i, p, n])
  for i in range(T):
    for p in range(V):
      for n in range(N):
        model +=  u[i, p, n] <= v[i, p, n]
        model += -u[i, p, n] <= v[i, p, n]
  # State and Input vector start and end values
  for p in range(V):
    for k in range(2*N):
      model += x[0, p, k]   == x_ini[p, k]
      model += x[T-1, p, k] == x_fin[p, k]
  # Model Dynamics
  for constraint in freeSpaceDynamics(x, u, T, V, N, m, del_t):
    model += constraint
  # State and Input vector limits
  for i in range(T):
    for p in range(V):
      for n in range(N):
        model += u[i, p, n] <=  u_lim[p, n]
        model += u[i, p, n] >= -u_lim[p, n]
      for k in range(2*N): # Necessary?
        model += x[i, p, k] <=  x_lim[p, k]
        model += x[i, p, k] >= -x_lim[p, k]

  # Obstacle Avoidance
  # ------------------
  if objects.shape[1] > 0:
    for i in range(T):
      for p in range(V):
        for l in range(numObjects):
          model += pulp.lpSum(a[i, p, l, n] for n in range(2*N)) <= 2*N-1
          for n in range(N):
            model += x[i, p, n] >= objects[l, N+n] + minApproachDist - M*a[i, p, l, N+n]
            model += x[i, p, n] <= objects[l, n] - minApproachDist + M*a[i, p, l, n]

  # Collision Avoidance
  # -------------------
  if V > 1: # If more than one vehicle
      for i in range(T):
        for p in range(V):
          for q in range(V):
            if q > p:
              model += pulp.lpSum(b[i, p, q, k] for k in range(2*N)) <= 2*N-1
              for n in range(N):
                model += x[i, p, n] - x[i, q, n] >= r[n] - M*b[i, p, q, n]
                model += x[i, q, n] - x[i, p, n] >= r[n] - M*b[i, p, q, n+N]

  # Plume Impingement
  # -----------------
  # Positive thrust
  if V > 1: # If more than one vehicle
      for i in range(T):
        for p in range(V):
          for q in range(V):
            if q != p:
              for n in range(N):
                model += pulp.lpSum(c_plus[i, p, q, n, k] for k in range(2*N)) <= 2*N
                model += -u[i, p, n] >= - M*c_plus[i, p, q, n, 0]
                model += x[i, p, n] - x[i, q, n] >= P - M*c_plus[i, p, q, n, n]
                model += x[i, q, n] - x[i, p, n] >= - M*c_plus[i, p, q, n, n+N]
                for m in range(N):
                  if m != n:
                    x[i, p, m] - x[i, q, m] >= W - M*c_plus[i, p, q, n, m]
                    x[i, q, m] - x[i, p, m] >= W - M*c_plus[i, p, q, n, m+N]
  # Negative thrust
      for i in range(T):
        for p in range(V):
          for q in range(V):
            if q != p:
              for n in range(N):
                model += pulp.lpSum(c_minus[i, p, q, n, k] for k in range(2*N)) <= 2*N
                model += u[i, p, n] >= - M*c_minus[i, p, q, n, 0]
                model += x[i, p, n] - x[i, q, n] >= - M*c_minus[i, p, q, n, n]
                model += x[i, q, n] - x[i, p, n] >= P - M*c_minus[i, p, q, n, n+N]
                for m in range(N):
                  if m != n:
                    x[i, p, m] - x[i, q, n] >= W - M*c_minus[i, p, q, n, m]
                    x[i, q, m] - x[i, p, m] >= W - M*c_minus[i, p, q, n, m+N]


  # Plume Avoidance for Vehicles
  # ----------------------------

  # Plume Avoidance for Obstacles
  # -----------------------------

  # Final Configuration Selection
  # -----------------------------

  # Solve model and return results in dictionary
  model.solve(pulp.CPLEX())

  # Create Pandas dataframe for results and return

  return {'model':model, 'x':x, 'u':u}
예제 #44
0
scene_sizes = [len(scene_indices[scenes[i]]) for i in range(0, len(scenes))]

# Matrix on same format as decision variable, R[i, j] = 1 if actor j can play role i
R = {}
for i in range(0, m):
    for j in range(0, n):
        R[i, j] = int(actors[j] in possible_actors[roles[i]])

## State binary LP
# Decision variable
x = LpVariable.dicts('Rollefordeling', [(i, j) for i in range(0, m)
                                        for j in range(0, n)], 0, 1, LpBinary)
prob = LpProblem('RoleDistribution', LpMaximize)

# Maximize number of people who have a role
prob += lpSum(x[i, j] for i in range(0, m) for j in range(0, n))
# Only one role per person
for j in range(0, n):
    prob += lpSum(x[i, j] for i in range(0, m)) <= 1

# Only one person per role
for i in range(0, m):
    prob += lpSum(x[i, j] for j in range(0, n)) <= 1

# Each scene should either have all roles assigned, or none (meaning it is not included)
scene_included = LpVariable.dicts('SceneChoice', range(0, t), 0, 1, LpBinary)
for s in range(0, t):
    current_scene_indices = scene_indices[scenes[s]]
    prob += lpSum(x[i, j] for i in current_scene_indices
                  for j in range(0, n)) == scene_included[s] * scene_sizes[s]
예제 #45
0
    def optimize(self, prices, forecasts=None, initial_charge=0, timestep='5min'):
        """Run the linear program to optimize the battery.

        prices         list [$/MWh]
        forecasts      list [$/MWh]
        initial_charge float [MWh]
        timestep       str   5min, 1hr etc
        """
        self.prob = LpProblem('cost minimization', LpMinimize)

        self.timestep = timestep
        timestep_timedelta = parse_timedelta(timestep)
        timestep_hours = timestep_timedelta.total_seconds() / (60*60)
        self.step = 1 / timestep_hours
        #  append a NaN onto the prices list to represent the price
        #  during the last reported period, which is only used to give the
        #  final charge, and not included in the optimization
        prices = list(prices)
        prices.append(None)

        if forecasts is None:
            forecasts = prices
        else:
            # If we're not inheriting the prices, we need to append to forecast
            # to match the price list.
            forecasts.append(None)

        forecast_len = len(forecasts)
        price_len = len(prices)
        len_msg = """
            The number of forecasts({}) should match the number of prices({}).
        """.format(forecast_len, price_len)
        assert forecast_len == price_len, len_msg

        assert initial_charge <= self.capacity
        assert initial_charge >= 0

        #  used to index timesteps
        idx = range(0, len(prices))

        self.vars = self.setup_vars(idx)

        imports = self.vars['imports']
        exports = self.vars['exports']
        charges = self.vars['charges']
        losses = self.vars['losses']

        #  the objective function we are minimizing
        self.prob += lpSum(
            [imports[i] * forecasts[i] for i in idx[:-1]] +
            [-(exports[i] - losses[i]) * forecasts[i] for i in idx[:-1]]
        )

        #  initial charge
        self.prob += charges[0] == initial_charge

        #  last item in the index isn't used because the last timestep only
        #  represents the final charge level - no import or export is done
        for i in idx[:-1]:
            #  energy balance across two time periods
            self.prob += charges[i + 1] == charges[i] + (imports[i] - exports[i]) / self.step

            #  constrain battery charge level
            self.prob += charges[i] <= self.capacity
            self.prob += charges[i] >= 0

            self.prob += losses[i] == exports[i] * (1 - self.efficiency)

        print('starting linear program for {}'.format(self))
        self.prob.solve()

        opt_results = {
            "name": "optimization_results",
            "status": LpStatus[self.prob.status]
        }

        print('linear program for {} done - {}'.format(self, opt_results['status']))

        logger.info(json.dumps(opt_results))

        self.info = self.generate_outputs(prices, forecasts, idx,
                                          initial_charge)

        return self.info
예제 #46
0
    def sign(agent_id: str, agreements: List[Contract],
             trust_probabilities: Dict[str, float], inventory: int):
        """
        Given a list of agreements and trust probabilities, each of type negmas.Contract, decides which agreements to sign.
        :param agent_id: the agent's id (self.id of the calling agent)
        :param agreements: a list of agreements, each of type negmas.Contracts.
        :param trust_probabilities: a dictionary mapping an agent's id to its trust probability
        :return: a dictionary with information about the solver. In particular, the dictionary contains an entry 'list_of_signatures' which is
         a list of the same length as the input list of agreements. The i-th element of the list 'list_of_signatures' is self.id/None in case
         the agent wants/do not wants to sign the i-th agreement in the input list.
        """
        # If the list of agreements is empty, then return an empty list of signatures.
        if len(agreements) == 0:
            return {
                'list_of_signatures': [],
                'agent_id': agent_id,
                'model': None,
                'time_to_generate_ilp': None,
                'time_to_solve_ilp': None,
                'agreements': agreements,
                'trust_probabilities': trust_probabilities,
                'profit': None
            }

        # Partition agreements into buy and sell agreements.
        agreements_to_buy_inputs, agreements_to_sell_outputs = SCMLContractsSigner.partition_agreements(
            agent_id, agreements, trust_probabilities)

        # If there are no sell contracts, the signer has nothing to do and signs nothing.
        if len(agreements_to_sell_outputs) == 0:
            return {
                'list_of_signatures': [None] * len(agreements),
                'agent_id': agent_id,
                'model': None,
                'time_to_generate_ilp': None,
                'time_to_solve_ilp': None,
                'agreements': agreements,
                'trust_probabilities': trust_probabilities,
                'profit': None
            }

        # For efficiency purposes, we order the agreements by delivery times. But, before we do, we must be able to
        # recover the indices of the agreements as given to the solver, otherwise, we can't map the output to the right agreements.
        buy_agreements = [
            agreement + (i, )
            for i, agreement in enumerate(agreements_to_buy_inputs)
        ]
        sell_agreements = [
            agreement + (i, )
            for i, agreement in enumerate(agreements_to_sell_outputs)
        ]

        # At this point an agreement is a tuple: (MASTER_INDEX, QUANTITY, TIME, PRICE, SUB_INDEX). Now we order by TIME.
        buy_agreements = sorted(buy_agreements,
                                key=lambda x: x[SCMLContractsSigner.TIME])
        sell_agreements = sorted(sell_agreements,
                                 key=lambda x: x[SCMLContractsSigner.TIME])

        # The code that follows will change the agreement lists, so we make a copy of them for later reference.
        buy_agreements_copy = buy_agreements.copy()
        sell_agreements_copy = sell_agreements.copy()

        t0 = time.time()
        # Decision variables
        buy_sign_vars = pulp.LpVariable.dicts(
            'buy_sign', (i for i, _ in enumerate(buy_agreements)),
            lowBound=0,
            upBound=1,
            cat='Integer')
        sell_sign_vars = pulp.LpVariable.dicts(
            'sell_sign', (i for i, _ in enumerate(sell_agreements)),
            lowBound=0,
            upBound=1,
            cat='Integer')

        # Generate the pulp problem.
        model = pulp.LpProblem('Contract_Signer_Solver', pulp.LpMaximize)

        # The objective function is profit, defined as revenue minus cost.
        model += pulp.lpSum([
            sell_agreements[i][SCMLContractsSigner.QUANTITY] *
            sell_agreements[i][SCMLContractsSigner.PRICE] *
            sell_agreements[i][SCMLContractsSigner.PARTNER_TRUST] *
            sell_sign_vars[s[SCMLContractsSigner.SUB_INDEX]]
            for i, s in enumerate(sell_agreements)
        ] + [
            -1.0 * buy_agreements[i][SCMLContractsSigner.QUANTITY] *
            buy_agreements[i][SCMLContractsSigner.PRICE] *
            buy_agreements[i][SCMLContractsSigner.PARTNER_TRUST] *
            buy_sign_vars[b[SCMLContractsSigner.SUB_INDEX]]
            for i, b in enumerate(buy_agreements)
        ])

        # Construct the constraints. The constraint model inventory feasibility, i.e., we don't commit to a sell unless we have enough outputs.
        current_sell_time = sell_agreements[0][SCMLContractsSigner.TIME]
        current_sell_time_sum = []
        partial_sell_sum = []
        partial_buy_sum = []
        result = []
        while len(sell_agreements) > 0:
            s = sell_agreements.pop(0)
            if current_sell_time == s[SCMLContractsSigner.TIME]:
                current_sell_time_sum += [
                    sell_sign_vars[s[SCMLContractsSigner.SUB_INDEX]] *
                    s[SCMLContractsSigner.QUANTITY]
                ]
            else:
                partial_buy_sum += SCMLContractsSigner.constraints_generation_helper(
                    buy_agreements, buy_sign_vars, current_sell_time)
                result += [(current_sell_time_sum.copy(),
                            partial_buy_sum.copy(), partial_sell_sum.copy())]
                partial_sell_sum += current_sell_time_sum
                current_sell_time = s[SCMLContractsSigner.TIME]
                current_sell_time_sum = [
                    sell_sign_vars[s[SCMLContractsSigner.SUB_INDEX]] *
                    s[SCMLContractsSigner.QUANTITY]
                ]
        partial_buy_sum += SCMLContractsSigner.constraints_generation_helper(
            buy_agreements, buy_sign_vars, current_sell_time)
        result += [(current_sell_time_sum.copy(), partial_buy_sum.copy(),
                    partial_sell_sum.copy())]
        for left, middle, right in result:
            model += sum(left) <= inventory + sum(middle) - sum(right)

        # Measure the time taken to generate the ILP.
        time_to_generate_ilp = time.time() - t0

        # Solve the integer program and hide the output given by the solver.
        t0_solve = time.time()
        model.solve(pulp.PULP_CBC_CMD(msg=False))
        time_to_solve_ilp = time.time() - t0_solve

        # Record which contracts should be signed. We start by assuming no contracts will be signed.
        list_of_signatures = [None] * len(agreements)
        for agreement in buy_agreements_copy:
            if buy_sign_vars[agreement[
                    SCMLContractsSigner.
                    SUB_INDEX]].varValue is not None and int(
                        buy_sign_vars[agreement[
                            SCMLContractsSigner.SUB_INDEX]].varValue) == 1:
                list_of_signatures[agreement[
                    SCMLContractsSigner.MASTER_INDEX]] = agent_id

        for agreement in sell_agreements_copy:
            if sell_sign_vars[agreement[
                    SCMLContractsSigner.
                    SUB_INDEX]].varValue is not None and int(
                        sell_sign_vars[agreement[
                            SCMLContractsSigner.SUB_INDEX]].varValue) == 1:
                list_of_signatures[agreement[
                    SCMLContractsSigner.MASTER_INDEX]] = agent_id

        # Return multiple objects for inspection purposes. In production, we care about the list of sign contracts, 'list_of_signatures'.
        return {
            'list_of_signatures': list_of_signatures,
            'agent_id': agent_id,
            'model': model,
            'time_to_generate_ilp': time_to_generate_ilp,
            'time_to_solve_ilp': time_to_solve_ilp,
            'agreements': agreements,
            'trust_probabilities': trust_probabilities,
            'profit': pulp.value(model.objective)
        }
예제 #47
0
파일: task2.py 프로젝트: elgator/linprog
# AG: we can live without this constraint if we min function
# model += pulp.lpSum(xa) == pulp.lpSum(requirement_a.values())
# model += pulp.lpSum(xb) == pulp.lpSum(requirement_b)

# Constraint: positive inventory
inv_a = dict()
inv_b = dict()
for d in days:
    inv_b[d] = accumulate(xb, d) - accumulate(requirement_b, d)
    model += inv_b[d] >= 0, f"Pos.inv.B at day {d}"
    inv_a[d] = accumulate(xa, d) - accumulate(requirement_a, d)
    model += inv_a[d] >= 0, f"Pos.inv.A at day {d}"

# Target: min inventory
# AG: added requirement_b_to_a term as we need to scale the penalty for b by the proportion of a
model += pulp.lpSum(
    inv_a.values()) + pulp.lpSum(inv_b.values()) * (requirement_b_to_a + 1)

feasibility = model.solve()
print("Status:", pulp.LpStatus[feasibility])
if feasibility == 1:
    import pandas as pd

    df = pd.DataFrame(
        dict(
            sales_b=purchases_b,
            production_b=peek(xb),
            inventory_b=peek(inv_b),
            processing_a=values(requirement_a) - purchases_a,
            sales_a=purchases_a,
            requirement_a=values(requirement_a),
            production_a=peek(xa),
예제 #48
0
    def __calcW(self, goal=1, eps=0.00):
        oidx = [i for i in range(self.M)]
        Nsols = len(self.solutionsList)
        # Create a gurobi model
        prob = lp.LpProblem("max_mean", lp.LpMaximize)
        # prob = mip.Model(sense=mip.MAXIMIZE, solver_name=mip.GRB)

        # Creation of linear integer variables
        w = list(
            lp.LpVariable.dicts('w', oidx, lowBound=0,
                                cat='Continuous').values())
        # w = list(prob.add_var(name='w', var_type=mip.CONTINUOUS, lb=0) for i in range(len(Ingredients)))
        uR = list(lp.LpVariable.dicts('uR', oidx, cat='Continuous').values())
        # uR = list()
        kp = list(
            lp.LpVariable.dicts('kp', list(range(Nsols)),
                                cat='Continuous').values())
        # kp = list()
        nu = list(
            lp.LpVariable.dicts('nu', oidx, lowBound=0,
                                cat='Continuous').values())
        # nu = list()

        test = False
        if test:
            kpB = list(
                lp.LpVariable.dicts('kpB',
                                    list(range(Nsols)),
                                    lowBound=0,
                                    upBound=1,
                                    cat='Continuous').values())
            nuB = list(
                lp.LpVariable.dicts('nuB',
                                    oidx,
                                    lowBound=0,
                                    upBound=1,
                                    cat='Continuous').values())
        else:
            kpB = list(
                lp.LpVariable.dicts('kpB', list(range(Nsols)),
                                    cat='Binary').values())
            nuB = list(lp.LpVariable.dicts('nuB', oidx, cat='Binary').values())

        v = lp.LpVariable('v', cat='Continuous')
        mu = lp.LpVariable('mu', cat='Continuous')

        # Inherent constraints of this problem
        for value, sols in enumerate(self.solutionsList):
            expr = lp.lpDot(self.__normw(sols.w), uR)
            cons = self.__normf(sols.objs) @ self.__normw(sols.w)
            prob += expr >= cons * (1 - eps)

        for i in oidx:
            expr = uR[i] - lp.lpSum([
                kp[conN] * self.__normf(sols.objs)[i]
                for conN, sols in enumerate(self.solutionsList)
            ]) - nu[i] + mu
            prob += expr == 0

        bigC = max(self.__normf(self.__globalU))

        for conN, sols in enumerate(self.solutionsList):
            # d, dvec = self.__calcD(sols)
            expr = v - lp.lpDot(w, self.__normf(sols.objs))
            prob += -expr >= 0  #mantem
            prob += -expr <= kpB[conN] * bigC  #mantem
            prob += kp[conN] >= 0  #mantem
            prob += kp[conN] <= (1 - kpB[conN])  #mantem

        for i in oidx:
            prob += uR[i] >= self.__normf(self.__globalL)[i]

        for i in oidx:
            prob += w[i] >= 0  #mantem
            prob += w[i] <= nuB[i]  #mantem
            prob += nu[i] >= 0  #mantem
            prob += nu[i] <= (1 - nuB[i]) * 2 * bigC  #mantem

        prob += lp.lpSum([w[i] for i in oidx]) == 1
        prob += lp.lpSum([kp[i] for i in range(Nsols)]) == 1

        prob += mu

        # desigualdades válidas
        prob += mu <= v

        try:
            rnd = np.array(
                sorted([0] + [np.random.rand()
                              for i in range(self.M - 1)] + [1]))
            w_ini = np.array([rnd[i + 1] - rnd[i] for i in range(self.__M)])
            w_ini = w_ini / w_ini.sum()
            for wi, wii in zip(w, w_ini):
                wi.start = wii
            grbs = lp.GUROBI(epgap=self.__mip_gap,
                             SolutionLimit=1,
                             msg=False,
                             OutputFlag=False,
                             Threads=1)
            prob.solve(grbs)

            if self.__goal != float('inf'):
                grbs = lp.GUROBI(timeLimit=self.__time_limit,
                                 epgap=self.__mip_gap,
                                 SolutionLimit=MAXINT,
                                 msg=False,
                                 BestObjStop=self.__goal,
                                 OutputFlag=False,
                                 Threads=1)
            else:
                grbs = lp.GUROBI(timeLimit=self.__time_limit,
                                 epgap=self.__mip_gap,
                                 SolutionLimit=MAXINT,
                                 msg=False,
                                 OutputFlag=False,
                                 Threads=1)
            prob.solve(grbs)
        except:
            cbcs = lp.COIN_CMD(maxSeconds=self.__time_limit,
                               fracGap=self.__mip_gap,
                               threads=1)
            prob.solve(cbcs, use_mps=False)

        feasible = False if prob.status in [-1, -2] else True

        if feasible:
            w_ = np.array(
                [lp.value(w[i]) if lp.value(w[i]) >= 0 else 0 for i in oidx])
            w_ = w_ / w_.sum()
            if self.__norm:
                w_ = w_ / (self.__globalU - self.__globalL)
            fobj = lp.value(prob.objective)
            self.__w = np.array(w_)
            self.__importance = fobj
        else:
            raise ('Non-feasible solution')
예제 #49
0
Csums = pulp.LpVariable.dicts("line_prod", (prod_line, orderList), 0, None,
                              pulp.LpInteger)
r = pulp.LpVariable.dicts("release", (prod_line, orderList), 0)
compD = pulp.LpVariable.dicts("compDate", (prod_line, orderList), 0)
#CLines=pulp.LpVariable("CLines",[j for j in line_date] ,0) #total amount of all lines by day
Cmax = pulp.LpVariable.dicts("maxDate", [i for i in orderList], 0)
#Create the 'prob' variable to contain the problem data
prob = pulp.LpProblem("The APS Problem", pulp.LpMaximize)
#prob=pulp.LpProblem("The APS Problem",pulp.LpMinimize)

#objective function: consider order priority and leadtime
#eps=1e-2
#lambda compD[l][o] if compD[l][o]<=len(plan_dates) else len(plan_dates)
#
prob+=pulp.lpSum([orderPool['priority'][o]*orderPool['order_type'][o]*\
                  adt.model_total_volume(order_spd[(order_spd['order_id']==o)&(order_spd['line_no']==l)][['day_process','num_by_day']],adt.prod_days(compD[l][o],len(plan_dates),r[l][o]),len(plan_dates))\
                  for o in orderList for l in prod_line])

#prob+=Cmax+eps*lpSum([r[j] for j in order_line])-eps*[compD[j] for j in order_line]
#The constraints
#1. every order has to be completed before due date
#2. relationships between release date and due date
#3. release date>=max(0,epst-plan_dates[0])
#4.fixed sum across day and lines equal total num of orders
for o in orderList:
    for l in prod_line:
        prob += compD[l][o] <= Cmax[o]
        #prob+=compD[l][o] >=r[l][o]+Csums[l][o]*(process_days[o][l]/orderPool['order_num'][o])

        prob += compD[l][o] >= r[l][o] + adt.process_csum(
            Csums[l][o], order_spd[
def blocked_reactions_analysis(
        database,
        pulp_solver,
        specific_bounds,
        custom_flux_constraints,
        excluded_reactions=None,
        target_reactions_list=None,
        logger=None):
    """
    Perform flux variability analysis on the database,
    based on the overall reaction equation of optstoic.
    If a reaction cannot carry flux (i.e., -eps <= v(j) <= eps, where eps = 1e-8),
    then the reaction is considered as a blocked reaction.
    The blocked reactions are then eliminated from the database S matrix.
    Next, the internal loops (excluding cofactors) are identified.
    Then, optStoic analysis can be performed for pathway prospecting.

    max/min v(j)

    subject to:
            sum(j, S(i,j) * v(j)) = 0, for all i
            custom_flux_constraints

        Note: The glycolysis study was done using the GAMS version of this code.
        This is written in attempt to port find_blocked_reactions.gms from GAMS to Python,
        as a part of effort to generalize optstoic analysis.

    Args:
        database (:obj:`BaseReactionDatabase`): The default reaction database
            without blocked reactions/loops.
        pulp_solver (TYPE): The solver for PuLP.
        specific_bounds (dict): LB and UB for exchange reactions which defined the
            overall design equations. E.g. {'Ex_glc': {'LB': -1, 'UB':-1}}
        custom_flux_constraints (TYPE): The custom constraints that need to be
            added to the model formulation.
        excluded_reactions (None, optional): The list of reactions that are manually
            selected to be excluded from optstoic solution.
        target_reactions_list (None, optional): If provided, the blocked reaction analysis is performed
            only on a subset of the reaction provided. If None, the blocked reaction analysis
            will be performed on all reactions in the database. The excluded_reactions set
            can be subtracted(e.g., set(database.reactions) - excluded_reactions), since
            they are blocked reactions.
        logger (:obj:`logging.logger`, optional): The logging instance

    Returns:
        TYPE: Description

    Raises:
        ValueError: Description

    Deleted Parameters:
        user_defined_export_rxns_Sji (dict): The list of export reactions that
            need to be added to the model for metabolite exchange (i.e., any metabolite
            that participate in the design equation)
    """
    if logger is None:
        logger = create_logger(
            name="optstoicpy.script.database_preprocessing.blocked_reactions_analysis")

    logger.warning(
        "This process may take a long time to run. It is recommended to be run in a batch script.")

    M = 1000
    EPS = 1e-8

    # Initialize variables
    v = pulp.LpVariable.dicts("v", database.reactions,
                              lowBound=-M, upBound=M, cat='Continuous')

    for j in database.reactions:
        if database.rxntype[j] == 0:
            # Forward irreversible
            v[j].lowBound = 0
            v[j].upBound = M

        elif database.rxntype[j] == 1:
            # Reversible
            v[j].lowBound = -M
            v[j].upBound = M

        elif database.rxntype[j] == 2:
            # Reverse irreversible
            v[j].lowBound = -M
            v[j].upBound = 0

        elif database.rxntype[j] == 4:
            v[j].lowBound = 0
            v[j].upBound = 0

        else:
            raise ValueError("Reaction type for reaction %s is unknown." % j)

    if excluded_reactions is not None:
        for j in excluded_reactions:
            v[j].lowBound = 0
            v[j].upBound = 0

    # Fix stoichiometry of source/sink metabolites
    for j, bounds in specific_bounds.items():
        v[j].lowBound = bounds['LB']
        v[j].upBound = bounds['UB']

    FVA_res = {}
    blocked_reactions = []
    lp_prob = None

    if target_reactions_list is None:
        target_reactions_list = database.reactions
    num_rxn = len(target_reactions_list)

    for ind, j1 in enumerate(target_reactions_list):
        logger.debug("%s/%s" % (ind, num_rxn))
        FVA_res[j1] = {}

        for obj in ['min', 'max']:

            # Variables (make a copy)
            vt = copy.deepcopy(v)
            del lp_prob

            # Objective function
            if obj == 'min':
                lp_prob = pulp.LpProblem("FVA%s" % obj, pulp.LpMinimize)
                lp_prob += vt[j1], "FVA_min"
            elif obj == 'max':
                lp_prob = pulp.LpProblem("FVA%s" % obj, pulp.LpMaximize)
                lp_prob += vt[j1], "FVA_max"

            # Constraints
            # Mass_balance
            for i in database.metabolites:
                # If metabolites not involve in any reactions
                if i not in database.S:
                    continue
                label = "mass_balance_%s" % i
                dot_S_v = pulp.lpSum([database.S[i][j] * vt[j]
                                      for j in list(database.S[i].keys())])
                condition = dot_S_v == 0
                lp_prob += condition, label

            if custom_flux_constraints is not None:
                logger.info("Adding custom constraints...")

                for group in custom_flux_constraints:
                    lp_prob += pulp.lpSum(vt[rxn] for rxn in group['reactions']
                                          ) <= group['UB'], "%s_UB" % group['constraint_name']
                    lp_prob += pulp.lpSum(vt[rxn] for rxn in group['reactions']
                                          ) >= group['LB'], "%s_LB" % group['constraint_name']

            lp_prob.solve(solver=pulp_solver)

            FVA_res[j1][obj] = pulp.value(lp_prob.objective)

        if (FVA_res[j1]['max'] < EPS) and (FVA_res[j1]['min'] > -EPS):
            blocked_reactions.append(j1)

        json.dump(FVA_res,
                  open("temp_FVA_result.json", 'w+'),
                  sort_keys=True,
                  indent=4)

    return blocked_reactions, FVA_res
예제 #51
0
def build_local_problem_lp(pb, name):
    lp = pulp.LpProblem(name + ".lp", pulp.LpMinimize)
    lp.setSolver()
    prod_vars = {}
    in_use_vars = {}
    defaillance = {}
    cout_proportionnel_defaillance = 3000  #euros/MWh
    #demande = demande-auto_conso
    #auto_conso<=Enr
    #Enr=Enr-auto_conso
    #inject=

    for t in pb.time_steps:
        prod_vars[t] = {}
        in_use_vars[t] = {}

        var_name = "battery_stock" + str(t)
        prod_vars[t]["battery_stock"] = pulp.LpVariable(
            var_name, 0.0, pb.battery.energy_max)

        var_name = "battery_store" + str(t)
        prod_vars[t]["battery_store"] = pulp.LpVariable(
            var_name, -pb.battery.power_max_store, 0.0)

        var_name = "battery_prod" + str(t)
        prod_vars[t]["battery_prod"] = pulp.LpVariable(
            var_name, 0.0, pb.battery.power_max_prod)

        cnt_name = "cnt_storage_evolution_" + str(t)
        if t == pb.time_steps[0]:
            #initial stock=0
            lp += prod_vars[t][
                "battery_stock"] == pb.time_step_duration / 60.0 * (
                    -pb.battery.efficiency * prod_vars[t]["battery_store"] -
                    prod_vars[t]["battery_prod"]), cnt_name
        else:
            lp += prod_vars[t]["battery_stock"] == prod_vars[
                t - 1]["battery_stock"] + pb.time_step_duration / 60.0 * (
                    -pb.battery.efficiency * prod_vars[t]["battery_store"] -
                    prod_vars[t]["battery_prod"]), cnt_name

        var_name = "auto_conso_" + str(t)
        prod_vars[t]["auto_conso"] = pulp.LpVariable(var_name, 0.0,
                                                     pb.local_demand[t])
        var_name = "payed_" + str(t)
        prod_vars[t]["payed_conso"] = pulp.LpVariable(var_name, 0.0,
                                                      pb.local_demand[t])
        var_name = "sold_local_production" + str(t)
        prod_vars[t]["sold_local_production"] = pulp.LpVariable(
            var_name, 0.0, pb.local_solar_power[t] + pb.battery.power_max_prod)

        lp += prod_vars[t]["auto_conso"] + prod_vars[t][
            "payed_conso"] == pb.local_demand[t], "cnt_local_conso_" + str(t)

        lp += prod_vars[t][
            "sold_local_production"] == pb.local_solar_power[t] - prod_vars[t][
                "auto_conso"] + prod_vars[t]["battery_prod"] + prod_vars[t][
                    "battery_store"], "cnt_local_solar_" + str(t)


    lp.setObjective( pb.electricity_tariff*pulp.lpSum([prod_vars[t]["payed_conso"]  for t in pb.time_steps])* pb.time_step_duration/60.0\
                   -pulp.lpSum([pb.solar_power_selling_price_chronicle[t]*prod_vars[t]["sold_local_production"] for t in pb.time_steps]) * pb.time_step_duration/60.0)

    model = Model(lp, prod_vars)
    return model
예제 #52
0
def run(total, price):
    y = [None] * len(price)
    names = [p[1] for p in price]

    problem = pulp.LpProblem("Problem-1", pulp.LpMinimize)

    for index in range(len(price)):
        y[index] = pulp.LpVariable("y" + str(index), 0, 2, pulp.LpInteger)

    # problem += pulp.lpSum([y[i] for i in range(len(price))]), "The number of items"

    prices = [y[i] * price[i][0] for i in range(len(price))]
    # prices = [y[i] * price[i] for i in range(len(price))]

    problem += total - pulp.lpSum(prices), "Use as much"

    problem += total == pulp.lpSum(prices), "Use exact"

    problem += pulp.lpSum(prices) <= total, "Do not over"

    for index in range(len(price)):
        if len(price[index]) >= 4:
            # Set minimum
            problem += y[index] >= price[index][3]
        if len(price[index]) >= 3:
            # Set maximum
            problem += y[index] <= price[index][2]

    # 問題の式全部を表示
    print("問題の式")
    print("--------")
    print(problem)
    print("--------")
    print(price)

    # 計算
    result_status = problem.solve()

    # (解が得られていれば)目的関数値や解を表示
    if pulp.LpStatus[result_status] != "Optimal":
        print("解なし")
        return {"ok": False}
    else:
        obj = pulp.value(problem.objective)
        if obj != 0:
            print("解無し 残額: %d" % (obj))
            return {"ok": False}
        else:
            print("")
            print("計算結果")
            print("********")
            print("最適性 = {}".format(pulp.LpStatus[result_status]))
            print("目的関数値 = {}".format(obj))
            print("y = {}".format([int(pulp.value(v)) for v in y]))
            print("********")

            print("      ", price)
            used = 0
            for i, v in enumerate(zip(price, [int(pulp.value(v)) for v in y])):
                p, n = v
                if n > 0:
                    print("{}\t{}\t{}\t{}".format(i, p[0], n, p[1]))
                used += p[0] * n
            print("Total: ", used)
            count = {names[i]: int(pulp.value(y[i])) for i in range(len(y))}
            return {"ok": True, "count": count}
예제 #53
0
            missingMatching = False
print(len(matched))
# Create the model
model = LpProblem(name="small-problem", sense=LpMinimize)

# Initialize the decision variables
variables = [
    LpVariable(name=f"{i}", lowBound=0, upBound=1)
    for i in sorted(graph.keys())
]
for x in range(0, len(variables)):
    for edge in graph[x]:
        model += (variables[x - 1] + variables[edge - 1] >= 1)

# Add the objective function to the model
model += lpSum(variables)
status = model.solve()
print(f"status: {model.status}, {LpStatus[model.status]}")
print(f"objective: {model.objective.value()}")

# Solve the problem
status = model.solve()

for var in model.variables():
    if var.value() >= 0.5:
        var.value = lambda: 1
    else:
        var.value = lambda: 0
    print(f"{var.name}: {var.value()}")
print(f"objective: {model.objective.value()}")
예제 #54
0
def cg_secp_ilp(
    cg: ComputationConstraintsHyperGraph,
    agents: List[AgentDef],
    already_assigned: Distribution,
    computation_memory: Callable[[ComputationNode], float],
    communication_load: Callable[[ComputationNode, str], float],
) -> Distribution:

    agents = list(agents)
    agents_names = [a.name for a in agents]

    # Only keep computations for which we actually need to find an agent.
    comps_to_host = [
        c for c in cg.node_names() if not already_assigned.has_computation(c)
    ]

    # x_i^k : binary variable indicating if var x_i is hosted on agent a_k.
    xs = _build_cs_binvar(comps_to_host, agents_names)
    # alpha_ijk : binary variable indicating if  x_i and f_j are both on a_k.
    alphas = _build_alphaijk_binvars(cg, agents_names)
    logger.debug(f"alpha_ijk {alphas}")

    # LP problem with objective function (total communication cost).
    pb = LpProblem("distribution", LpMinimize)
    pb += (
        _objective_function(cg, communication_load, alphas, agents_names),
        "Communication costs",
    )

    # Constraints.
    # All variable computations must be hosted:
    for i in comps_to_host:
        pb += (
            lpSum([xs[(i, k)] for k in agents_names]) == 1,
            "var {} is hosted".format(i),
        )
    # Each agent must host at least one computation:
    # We only need this constraints for agents that do not already host a
    # computation:
    empty_agents = [
        a for a in agents_names if not already_assigned.computations_hosted(a)
    ]
    for k in empty_agents:
        pb += (
            lpSum([xs[(i, k)] for i in comps_to_host]) >= 1,
            "atleastone {}".format(k),
        )

    # Memory capacity constraint for agents
    for a in agents:
        # Decrease capacity for already hosted computations
        capacity = a.capacity - sum([
            secp_computation_memory_in_cg(c, cg, computation_memory)
            for c in already_assigned.computations_hosted(a.name)
        ])

        pb += (
            lpSum([
                secp_computation_memory_in_cg(i, cg, computation_memory) *
                xs[(i, a.name)] for i in comps_to_host
            ]) <= capacity,
            "memory {}".format(a.name),
        )

    # Linearization constraints for alpha_ijk.
    for (i, j), k in alphas:

        if i in comps_to_host and j in comps_to_host:
            pb += alphas[((i, j), k)] <= xs[(i, k)], "lin1 {}{}{}".format(
                i, j, k)
            pb += alphas[((i, j), k)] <= xs[(j, k)], "lin2 {}{}{}".format(
                i, j, k)
            pb += (
                alphas[((i, j), k)] >= xs[(i, k)] + xs[(j, k)] - 1,
                "lin3 {}{}{}".format(i, j, k),
            )

        elif i in comps_to_host and j not in comps_to_host:
            # Var is free, factor is already hosted
            if already_assigned.agent_for(j) == k:
                pb += alphas[((i, j), k)] == xs[(i, k)]
            else:
                pb += alphas[((i, j), k)] == 0

        elif i not in comps_to_host and j in comps_to_host:
            # if i is not in vars_vars_to_host, it means that it's a
            # computation that is already hosted (from  hints)
            if already_assigned.agent_for(i) == k:
                pb += alphas[((i, j), k)] == xs[(j, k)]
            else:
                pb += alphas[((i, j), k)] == 0

        else:
            # i and j are both alredy hosted
            if (already_assigned.agent_for(i) == k
                    and already_assigned.agent_for(j) == k):
                pb += alphas[((i, j), k)] == 1
            else:
                pb += alphas[((i, j), k)] == 0

    # Now solve our LP
    # status = pb.solve(GLPK_CMD())
    # status = pb.solve(GLPK_CMD(mip=1))
    # status = pb.solve(GLPK_CMD(mip=0, keepFiles=1,
    #                                options=['--simplex', '--interior']))
    status = pb.solve(GLPK_CMD(keepFiles=0, msg=False, options=["--pcost"]))

    if status != LpStatusOptimal:
        raise ImpossibleDistributionException("No possible optimal"
                                              " distribution ")
    else:
        logger.debug("GLPK cost : %s", pulp.value(pb.objective))

        comp_dist = already_assigned
        for k in agents_names:

            agt_vars = [
                i for i, ka in xs if ka == k and pulp.value(xs[(i, ka)]) == 1
            ]
            comp_dist.host_on_agent(k, agt_vars)

        return comp_dist
예제 #55
0
        LP += N_electrolyzer_2 * E_electrolyzer_min <= E_2[h]
        LP += N_electrolyzer_2 * E_electrolyzer_max >= E_2[h]
        LP += N_electrolyzer_3 * E_electrolyzer_min <= E_3[h]
        LP += N_electrolyzer_3 * E_electrolyzer_max >= E_3[h]

        # Reactor constraints
        LP += RNG[h] <= RNG_max
        if h != '0':
            LP += -RNG_max * tau <= RNG[h] - RNG[str(i - 1)]
            LP += RNG_max * tau >= RNG[h] - RNG[str(i - 1)]

        # HENG-specific constraints
        LP += 0.95 * H2_2[h] <= 0.05 * (NG[h] + RNG[h])

    # Integer constraints
    LP += pulp.lpSum(n * alpha_1[str(n)] for n in range(1, N_max)) == N_electrolyzer_1
    LP += pulp.lpSum(alpha_1) <= 1
    LP += pulp.lpSum(n * alpha_2[str(n)] for n in range(1, N_max)) == N_electrolyzer_2
    LP += pulp.lpSum(alpha_2) <= 1
    LP += pulp.lpSum(n * alpha_3[str(n)] for n in range(1, N_max + 1)) == N_electrolyzer_3
    LP += pulp.lpSum(alpha_3) <= 1

    # Emission constraints
    LP += pulp.lpSum(EMF_comb * RNG[h] + EMF[h] * E_1[h] + EMF_bio * CO2[h] + \
                     EMF_electrolyzer * H2_1[h] + EMF_reactor * RNG[h] \
                     for h in [str(x) for x in input_df.index]) == em_rng
    LP += pulp.lpSum(EMF_NG * NG[h] + EMF[h] * E_2[h] + EMF_electrolyzer * H2_2[h] \
                     for h in [str(x) for x in input_df.index]) == em_heng
    LP += num_vehicle * FCV_penetration * EMF_vehicle == em_offset_fcv
    LP += pulp.lpSum(EMF_NG * NG_demand[h] for h in input_df.index) == em_ng
예제 #56
0
파일: ilp_solver.py 프로젝트: nesl/Heliot
def _solver(Gt, Gd, target_latency=None):
    """
    Args:
        target_latency (int): latency constrain for this task graph
        source_list (list): source nodes in the task graph
        dst_list (list): destination nodes in the task graph
        Gt (networkx.DiGraph): task graph in a multi-source, single
        destination, no-loop directed graph, where src_k are data sources,
        dst is the actuator, and other nodes in between are tasks

           src1 -----> t11 -----> t12 ... ----->  dst
           src2 -----> t21 -----> t22 ... ----->
                 ...   ...  ...  ...
                -----> tk1 -----> tk2 ... ----->

            Gt.node[t] (dict): node, stores information of each task
            Gt[t1][t2] (dict): edge, stores relationship between tasks

            E(t1, t2) (Unit): input/output relationship between t1, t2
                If t1 will not ouput any data to t2, set the value to 0
                e.g. Gt[t1][t2][GtInfo.TRAFFIC] = Unit.byte(20)
                    Gt[t1][t2][GtInfo.TRAFFIC] = 0
            _It(t2) (Unit): total input data size to the task. Obtained
                from sum E(ti, t2) for all ti with an edge to t2. The value
                will be stored at Gt.node[t][GtInfo.RESRC_RQMT]
            _Ot(t1) (Unit): total ouput data size to the task. Obtained
                from sum E(t1, ti) for all ti with an edge from t1. The
                value will be stored at Gt.node[t][GtInfo.RESRC_RQMT]
            Lt(t,d) (Unit): computation latency of task t runs on device d.
                Devices can be categorized according to number of CPUs,
                GPUs, RAM size, and disk space.
                e.g. Gt.node[t][GtInfo.LATENCY_INFO] = {
                        Device.T2_MICRO: Unit.ms(100),
                        Device.P3_2XLARGE: Unit.ms(5)}
                    device_type = Device.type(Gd.node[d][GdInfo.HARDWARE])
                    Gt.node[t][GtInfo.LATENCY_INFO][device_type] = 100 (ms)
            Rt(t) (dict): minimum RESRC requirement for task t
            Rt(t,r,d): minimum requirement of RESRC r for task t of a
                specific build flavor for that device
                e.g. build_type = Flavor.type(Device.P3_2XLARGE)
                    assert(build_type == Flavor.GPU)
                    Gt.node[t][GtInfo.RESRC_RQMT][build_type] = {
                        Hardware.RAM: Unit.gb(2),
                        Hardware.HD: Unit.mb(512),
                        Hardware.CPU: Unit.percentage(10),
                        Hardware.GPU: Unit.percentage(60),
                        Hardware.CAMERA: 1,
                        Hardware.NIC_INGRESS: Unit.mb(2),  # _It(t)
                        Hardware.NIC_EGRESS: Unit.byte(20),  # _Ot(t)
                    }

        Gd (networkx.DiGraph): a directed graph describes network topology,
            where each node represent a device

            Gd[d] (dict): information of each device, including:

            Ld(d1, d2) (Unit): transmission time between two devices d1, d2
                If d2 is not reachable from d1, set the value to MAXINT
                e.g. Gd[d1][d2][GdInfo.LATENCY] = 20 (ms)
                    Gd[d1][d2][GdInfo.LATENCY] = Const.MAXINT
            _Hd(d) (dict): hardware specification of device d.
                Use this internal information to determine device_type
                Dd(t) and calculate Rd(d).
                e.g. Gd.node[d][GdInfo.HARDWARE] = {
                    Hardware.RAM: Unit.gb(16),
                    Hardware.HD: Unit.tb(1),
                    Hardware.CPU: 4,
                    Hardware.GPU: 1,
                    Hardware.GPS: 1,
                    Hardware.CAMERA: 1
                    Hardware.NIC_INGRESS: Unit.gbps(10),
                    Hardware.NIC_EGRESS: Unit.gbps(10),
                }
            _Dd(d) (enum): device type of device d, determined by hardware
                specification of the device. Used by Gt.node[t] for
                accessing information of the a certain device type
                e.g. device_type = Device.type(Gd.node[d][GdInfo.HARDWARE])
                    assert(device_type == Device.T2_MICRO)
            Rd(d) (dict): available RESRCs on device d.
            Rd(d, r) (Unit): availablity of RESRC r on device d.
                e.g. Gd.node[d][GdInfo.RESRC] = {
                    Hardware.RAM: Unit.gb(12),
                    Hardware.HD: Unit.gb(500),
                    Hardware.CPU: Unit.percentage(80),
                    Hardware.GPU: Unit.percentage(100),
                    Hardware.BW_INGRESS: Unit.mb(100),
                    Hardware.BW_EGRESS: Unit.mb(60),
                    Hardware.GPS: 1,
                    Hardware.PROXIMITY: 1,
                    Hardware.ACCELEROMETER: 1,
                    Hardware.GYROSCOPE: 1,
                    Hardware.CAMERA: 1,
                }

    decision variable:
        X(t,d) = 1 if assign task t to device d else 0
    objective: minimize longest path's overall latency in the task graph,
        i.e. total execution times + transmission latencies along the path

                         len(p)
        minimize   max   {  sum ( X(ti,di) * Lt(ti, di) )
        X(t,d)   p in Gt    i=1
                       len(p)-1
                    +   sum ( X(ti,di) * X(ti+1,di+1) * Ld(di, di+1) ) }
                        i=1

        this can be simplified by an auxiliary variable and rewrote as:

            minimize Y , where Y = max {....} above

        with additional constrains:

        for p in Gt:
                      len(p)
                Y >=   max   {  sum ( X(ti,di) * Lt(ti, di) )
                     p in Gt    i=1
                      len(p)-1
                   +   sum ( X(ti,di) * X(ti+1,di+1) * Ld(di, di+1) ) }
                       i=1

    constrians 1: neighbors in the task graph must also be accessible from
        each other in network graph
        for (ti, tj) in Gt.edges():
            for all combinations (di, dj) in Gd:
                X(ti,di) * X(tj,dj) * Ld(di, dj) < LATENCY_MAX

    constrians 2: device must be able to support what the tasks need
        for d in Gd:
            for r in Rd:
                           len(Gt)
                Rd(d,r) - { sum ( X(ti,d) * Rt(ti,r,d) ) } >= 0
                            i=1

    constrains 3: task -> device is one-to-one mapping, and all task must be
        mapped to one device

    since linear programming cannot have variable multiplication,
    use a helper variable XX to replace X(ti,di) * X(tj,dj)

        X(ti,di) * X(tj,dj) ---replace---> XX(X(ti,di),X(tj,dj))

    with additional constrains
        XX(X(ti,di),X(tj,dj)) + 1 >= X(ti,di) + X(tj,dj)
        XX(X(ti,di),X(tj,dj)) * 2 <= X(ti,di) + X(tj,dj)

    """
    # define invalid_latency as a constrain to filter out meaningless
    # solutions
    if target_latency:
        invalid_latency = target_latency * 2
    else:
        invalid_latency = Const.INT_MAX
    log.info(
        ('set invalid_latency={latency}. skip neighbors cannot be reached '
         'within {latency} ms.').format(latency=invalid_latency))

    # Generate all possible mappings of device_i <-> task_i
    known_mapping = {}
    for node in Gt.nodes():
        mapped_device = Gt.node[node].get(GtInfo.DEVICE, None)
        if mapped_device is not None:
            known_mapping[node] = mapped_device
    log.info('known_mapping: {}'.format(known_mapping))
    mapped_tasks = list(known_mapping)
    mapped_devices = listvalues(known_mapping)
    tasks = [t for t in Gt.nodes() if t not in mapped_tasks]
    devices = [d for d in Gd.nodes() if d not in mapped_devices]
    log.info('find possible mappings for {} tasks in {} devices'.format(
        len(tasks), len(devices)))

    # create LP problem
    prob = pulp.LpProblem("placethings", pulp.LpMinimize)
    # auxiliary variable: it represent the longest path's overall latency
    # in the task graph
    Y = pulp.LpVariable('LongestPathLength',
                        lowBound=0,
                        upBound=Const.INT_MAX,
                        cat='Interger')
    # objective: minimize longest path's overall latency in the task graph
    # later, add additional constrains for the auxiliary variable
    prob += Y

    # decision variable: X(t,d) = 1 if assign task t to device d else 0
    X = defaultdict(dict)
    all_unknown_X = []
    for t in tasks:
        for d in devices:
            X[t][d] = None
    for d in mapped_devices:
        for t in Gt.nodes():
            X[t][d] = 0
    for t in mapped_tasks:
        for d in Gd.nodes():
            X[t][d] = 0
        d_known = Gt.node[t][GtInfo.DEVICE]
        X[t][d_known] = 1  #Mapping which are added by user
    for t in tasks:
        for d in devices:
            if X[t][d] is None:
                X[t][d] = pulp.LpVariable('X_{}_{}'.format(t, d),
                                          lowBound=0,
                                          upBound=1,
                                          cat='Integer')
                all_unknown_X.append(X[t][d])
        # 1-1 map # One task can only map map to device
        prob += pulp.lpSum(listvalues(X[t])) == 1
    # number of X == 1 must equal to number of tasks
    log.info('there are {} unknowns'.format(len(all_unknown_X)))

    assert len(all_unknown_X) == len(tasks) * len(devices)
    # tasks are the tasks not yet assigned.
    # devices are the devices not yet mapped

    prob += pulp.lpSum(all_unknown_X) == len(tasks)
    # auxiliary variable: use XX to replace X[ti][di] * X[tj][dj]
    XX = defaultdict(dict)
    all_XX = []
    for ti in Gt.nodes():
        for di in Gd.nodes():
            for tj in Gt.nodes():
                for dj in Gd.nodes():
                    if (ti, tj) not in Gt.edges():
                        XX[(ti, di)][(tj, dj)] = 0
                    elif (di, dj) not in Gd.edges() or (
                            Gd[di][dj][GdInfo.LATENCY] > invalid_latency):
                        # constrians 1: neighbors in the task graph must also
                        # be accessible from each other in the network graph
                        XX[(ti, di)][(tj, dj)] = 0
                    else:
                        XX[(ti, di)][(tj,
                                      dj)] = pulp.LpVariable('XX_{}_{}'.format(
                                          (ti, di), (tj, dj)),
                                                             lowBound=0,
                                                             upBound=1,
                                                             cat='Integer')
                        all_XX.append(XX[(ti, di)][(tj, dj)])
                        # add constrains
                        prob += (XX[(ti, di)][(tj, dj)] + 1 >=
                                 X[ti][di] + X[tj][dj])
                        prob += (XX[(ti, di)][(tj, dj)] * 2 <=
                                 X[ti][di] + X[tj][dj])
    log.info('there are {} combinations for links'.format(len(all_XX)))
    prob += pulp.lpSum(all_XX) == len(Gt.edges())

    # Generate all simple paths in the graph G from source to target.
    src_list, dst_list, all_paths = _find_all_simple_path(Gt)
    log.info('find all path from {} to {}'.format(src_list, dst_list))
    # Generate all possible mappings
    all_mappings = list(pulp.permutation(devices, len(tasks)))
    log.info('{} possible mappings for {} devices and {} tasks'.format(
        len(all_mappings), len(devices), len(tasks)))
    task_to_idx = dict(zip(tasks, range(len(tasks))))
    # use constrains to model Y, the longest path for each mapping
    for device_mapping in all_mappings:
        for path in all_paths:
            assert path[0] in src_list
            assert path[len(path) - 1] in dst_list
            path_vars = []
            # first node: src
            ti = path[0]
            di = Gt.node[ti][GtInfo.DEVICE]
            # device_mapping start from path[1] to path[N-1]
            for j in range(1, len(path) - 1):
                tj = path[j]
                dj = device_mapping[task_to_idx[tj]]
                assert Gt.node[tj][GtInfo.DEVICE] is None
                assert dj in Gd.nodes()
                # get transmission latency from di -> dj
                Ld_di_dj = Gd[di][dj][GdInfo.LATENCY]
                # path_vars.append((X[ti][di] * X[tj][dj]) * Ld_di_dj)
                path_vars.append(XX[(ti, di)][(tj, dj)] * Ld_di_dj)
                log.debug('Ld_di_dj (move from {} to {}) = {}'.format(
                    di, dj, Ld_di_dj))
                # get computation latency for task tj at dj
                dj_type = Gd.node[dj][GdInfo.DEVICE_TYPE]
                # get latency of the default build flavor
                Lt_tj_dj = listvalues(
                    Gt.node[tj][GtInfo.LATENCY_INFO][dj_type])[0]
                path_vars.append(X[tj][dj] * Lt_tj_dj)
                log.debug('Lt_tj_dj (compute {} at {}) = {}'.format(
                    tj, dj, Lt_tj_dj))
                ti = tj
                di = dj
            # last node: dst
            tj = path[len(path) - 1]
            dj = Gt.node[tj][GtInfo.DEVICE]
            Ld_di_dj = Gd[di][dj][GdInfo.LATENCY]
            # path_vars.append(X[ti][di] * X[tj][dj] * Ld_di_dj)
            path_vars.append(XX[(ti, di)][(tj, dj)] * Ld_di_dj)
            log.debug('Ld_di_dj (move from {} to {}) = {}'.format(
                di, dj, Ld_di_dj))
            log.debug('add constrain for path:\n Y >= {}'.format(path_vars))
            # add constrain
            prob += Y >= pulp.lpSum(path_vars)

    # constrians 2: device must be able to support what the tasks need
    #     for d in Gd:
    #         for r in Rd:
    #                        len(Gt)
    #             Rd(d,r) - { sum ( X(ti,d) * Rt(ti,r,d) ) } >= 0
    #                         i=1
    for di in devices:
        for resrc in Hardware:
            # get available RESRC or set to 0
            Rd_d_r = Gd.node[di][GdInfo.RESRC].get(resrc, 0)
            var_list = []
            for ti in tasks:
                di_type = Gd.node[di][GdInfo.DEVICE_TYPE]
                # get the default flavor
                ti_flavor = list(Gt.node[ti][GtInfo.LATENCY_INFO][di_type])[0]
                Rt_t_r_d = (Gt.node[ti][GtInfo.RESRC_RQMT][ti_flavor].get(
                    resrc, 0))
                if Rt_t_r_d > 0:
                    var_list.append(X[ti][di] * Rt_t_r_d)
            if var_list:
                log.debug('add constrain for {}({}):\n {} <= {}'.format(
                    di, resrc, var_list, Rd_d_r))
                prob += pulp.lpSum(var_list) <= Rd_d_r

    # solve
    status = prob.solve(pulp.solvers.GLPK(msg=1))
    log.info('status={}'.format(pulp.LpStatus[status]))
    result_mapping = {}
    for t in Gt.nodes():
        for d in Gd.nodes():
            if pulp.value(X[t][d]):
                log.info('map: {} <-> {}, X_t_d={}'.format(
                    t, d, pulp.value(X[t][d])))
                result_mapping[t] = d
    return status, result_mapping
예제 #57
0
    def outer_recur_solve(self, temp_leximin, leximin_counts,
                          return_assignment=False):
        next_c_star = pulp.LpVariable(
            'next_leximin', 0, temp_leximin
        )

        x = pulp.LpVariable.dicts(
            'assignment',
            [(agent_id, intv_id)
            for agent_id in range(self.n_agents)
            for intv_id in range(self.n_intvs)],
            cat='Binary'
        )

        prob = pulp.LpProblem()
        prob += next_c_star

        # Assignment constraint
        for agent_id in range(self.n_agents):
            prob += pulp.lpSum(
                x[(agent_id, intv_id)]
                for intv_id in range(self.n_intvs)
            ) == 1

        # Capacity constraints
        for intv_id in range(self.n_intvs):
            prob += pulp.lpSum(
                x[(agent_id, intv_id)]
                for agent_id in range(self.n_agents)
            ) <= self.capacities[intv_id]

        # Leximin count constraints
        for leximin in leximin_counts:
            prob += pulp.lpSum(
                x[(agent_id, intv_id)]
                for agent_id, intv_id in np.argwhere(self.cost_matrix == leximin)
            ) == leximin_counts[leximin]

        # Constraints for next leximin
        for agent_id in range(self.n_agents):
            for intv_id in range(self.n_intvs):
                temp_cost = self.cost_matrix[agent_id, intv_id]

                if temp_cost not in leximin_counts:
                    prob += x[(agent_id, intv_id)] * temp_cost \
                        <= next_c_star

        status = prob.solve(solver=pulp.solvers.GUROBI_CMD())

        if pulp.LpStatus[status] == 'Optimal':
            if return_assignment:
                assignments = np.zeros((self.n_agents,), dtype=int)

                for agent_id in range(self.n_agents):
                    for intv_id in range(self.n_intvs):
                        if x[(agent_id, intv_id)].varValue == 1:
                            assignments[agent_id] = intv_id

                return next_c_star.varValue, assignments

            return next_c_star.varValue

        return False
예제 #58
0
def solve(professors, courses, semesters, slots):
    prob = pulp.LpProblem("Semester Problem", pulp.LpMaximize)

    opt_terms = []
    for k, v in lp_vars.items():
        if k[2] in n1s + n2s:
            opt_terms.append(100 * lp_vars_rev[v.name][2].size * v)
        else:
            opt_terms.append(lp_vars_rev[v.name][2].size * v)
    opt_fun = pulp.lpSum(lp_vars_rev[v.name][2].size * v
                         for k, v in lp_vars.items())
    prob += opt_fun

    #print(lp_vars.keys())

    # Maximum one class for slot
    for s in slots:
        for sem in semesters:
            v = pulp.lpSum(lp_vars[(p, c, s, sem)] \
                    for p in professors \
                    for c in courses)
            prob += v <= 1  #s.size

    #for sem in semesters:
    #    for s in sem.slots:
    #        v = pulp.lpSum(lp_vars[(p, c, s, sem)] \
    #                for p in professors \
    #                for c in courses)
    #        prob += v <= 1 #s.size
    for s in slots:
        for p in professors:
            v = pulp.lpSum(lp_vars[(p, c, s, sem)] \
                    for sem in semesters \
                    for c in courses)
            prob += v <= 1  #s.size

    # previne que haja choque entre os professores das turmas conjuntas
    for s in slots:
        a = pulp.lpSum(lp_vars[(priscila, c, s, sem)] \
                for sem in semesters \
                for c in courses)
        b = pulp.lpSum(lp_vars[(guilherme, c, s, sem)] \
                for sem in semesters \
                for c in courses)
        c = pulp.lpSum(lp_vars[(priscila_guilherme, c, s, sem)] \
                for sem in semesters \
                for c in courses)
        prob += a + c <= 1  #s.size
        prob += b + c <= 1  #s.size

    # previne que haja choque entre os professores das turmas conjuntas
    for s in slots:
        a = pulp.lpSum(lp_vars[(padilha, c, s, sem)] \
                for sem in semesters \
                for c in courses)
        b = pulp.lpSum(lp_vars[(emilio, c, s, sem)] \
                for sem in semesters \
                for c in courses)
        c = pulp.lpSum(lp_vars[(emilio_padilha, c, s, sem)] \
                for sem in semesters \
                for c in courses)
        prob += a + c <= 1  #s.size
        prob += b + c <= 1  #s.size

    for s in sex_noite:
        for sem in semesters:
            v = pulp.lpSum(lp_vars[(priscila_guilherme, c, s, sem)] \
                    for c in courses)
            prob += v == 0  #s.size

    for s in sex_noite:
        for sem in semesters:
            v = pulp.lpSum(lp_vars[(guilherme, c, s, sem)] \
                    for c in courses)
            prob += v == 0  #s.size

    for s in sex_noite:
        for sem in semesters:
            v = pulp.lpSum(lp_vars[(raquel, c, s, sem)] \
                    for c in courses)
            prob += v == 0  #s.size

    # Each professor must only give his classes
    for p in professors:
        v = pulp.lpSum(lp_vars[(p, c, s, sem)] * s.size \
                for s in slots \
                for c in p.courses \
                for sem in semesters )
        prob += v <= sum(c.num_hours for c in p.courses)

    # Each professor must not give other professor's classes
    for p in professors:
        v = pulp.lpSum(lp_vars[(p, c, s, sem)] \
                for s in slots \
                for c in courses if not c in p.courses \
                for sem in semesters )
        prob += v == 0

    # Each course must be complete
    for c in cs:
        v = pulp.lpSum(lp_vars[(p, c, s, sem)] * s.size \
                for s in slots \
                for p in ps \
                for sem in semesters )
        prob += v <= c.num_hours

    # Each semester must have all its courses filled
    #for sem in semesters:
    #    v = pulp.lpSum(lp_vars[(p, c, s, sem)] * s.size \
    #            for s in sem.slots \
    #            for p in ps \
    #            for c in sem.courses)
    #    prob += v <= sum(c.num_hours for c in sem.courses)

    # Each semester must only have its courses # tested
    for sem in semesters:
        v = pulp.lpSum(lp_vars[(p, c, s, sem)] \
                for s in slots \
                for p in ps \
                for c in cs if not c in sem.courses)
        prob += v == 0

    # Each semester must only have its slots # tested
    for sem in semesters:
        v = pulp.lpSum(lp_vars[(p, c, s, sem)] \
                for s in slots if not s in sem.slots \
                for p in ps \
                for c in cs)
        prob += v == 0

    # aplica as restrições legais
    for p in professors:
        #print(p, proibidos)
        for (a, b) in proibidos:
            va = pulp.lpSum(lp_vars[(p, c, a, sem)] \
                    for c in p.courses \
                    for sem in semesters )
            vb = pulp.lpSum(lp_vars[(p, c, b, sem)] \
                    for c in p.courses \
                    for sem in semesters )
            prob += va + vb <= 1

    # aplica restrições legais a turmas cojuntas
    for (a, b) in proibidos:
        va1 = pulp.lpSum(lp_vars[(priscila, c, b, sem)] \
                for c in courses \
                for sem in semesters )
        va2 = pulp.lpSum(lp_vars[(guilherme, c, b, sem)] \
                for c in courses \
                for sem in semesters )
        vb = pulp.lpSum(lp_vars[(priscila_guilherme, c, a, sem)] \
                for c in courses \
                for sem in semesters )
        prob += va1 + vb <= 1
        prob += va2 + vb <= 1
        va1 = pulp.lpSum(lp_vars[(emilio, c, b, sem)] \
                for c in courses \
                for sem in semesters )
        va2 = pulp.lpSum(lp_vars[(padilha, c, b, sem)] \
                for c in courses \
                for sem in semesters )
        vb = pulp.lpSum(lp_vars[(emilio_padilha, c, a, sem)] \
                for c in courses \
                for sem in semesters )
        prob += va1 + vb <= 1
        prob += va2 + vb <= 1

        va1 = pulp.lpSum(lp_vars[(priscila, c, a, sem)] \
                for c in courses \
                for sem in semesters )
        va2 = pulp.lpSum(lp_vars[(guilherme, c, a, sem)] \
                for c in courses \
                for sem in semesters )
        vb = pulp.lpSum(lp_vars[(priscila_guilherme, c, b, sem)] \
                for c in courses \
                for sem in semesters )
        prob += va1 + vb <= 1
        prob += va2 + vb <= 1
        va1 = pulp.lpSum(lp_vars[(emilio, c, a, sem)] \
                for c in courses \
                for sem in semesters )
        va2 = pulp.lpSum(lp_vars[(padilha, c, a, sem)] \
                for c in courses \
                for sem in semesters )
        vb = pulp.lpSum(lp_vars[(emilio_padilha, c, b, sem)] \
                for c in courses \
                for sem in semesters )
        prob += va1 + vb <= 1
        prob += va2 + vb <= 1

    # previne aulas faixa
    for p in professors:
        if not p.faixa:
            for (a, b) in evitar:
                va = pulp.lpSum(lp_vars[(p, c, a, sem)] \
                        for c in p.courses \
                        for sem in semesters )
                vb = pulp.lpSum(lp_vars[(p, c, b, sem)] \
                        for c in p.courses \
                        for sem in semesters )
                prob += va + vb <= 1

    prob.solve()
    print("Status:", pulp.LpStatus[prob.status])

    #for v in prob.variables():
    #    if v.varValue and v.varValue > 0:
    #        print(v.name, "=", v.varValue)

    def get_slot(s_, sem_):
        for (p, c, s, sem), v in lp_vars.items():
            if sem_ is sem and s is s_ and pulp.value(v) > 0:  #.varValue > 0:
                return lp_vars_rev[v.name]

    #print("\n\n\n")

    def print_m_(ms, sem):

        linha = []
        for s in ms:
            x = get_slot(s, sem)
            if x:
                label = "%s %s" % (x[0].name, x[1].name)
            else:
                label = "()"
            linha.append(label.center(16))

        print(",".join(linha))

    for sem in semesters:
        print("\n\n", sem)
        print("7:30, ", end=' ')
        print_m_(m1s, sem)
        print("10:10,", end=' ')
        print_m_(m2s, sem)
        print("13:30,", end=' ')
        print_m_(t1s, sem)
        print("16:10,", end=' ')
        print_m_(t2s, sem)
        print("19:10,", end=' ')
        print_m_(n1s, sem)
        print("21:00,", end=' ')
        print_m_(n2s, sem)

    print("Total Value:", pulp.value(prob.objective))

    print("\nCCR c/ carga horário insuficiente:")
    for c in cs:
        v = pulp.lpSum(lp_vars[(p, c, s, sem)] \
                for s in slots \
                for p in ps \
                for sem in semesters)
        if pulp.value(v) < 2:
            print("CCR:", c.name, "HS:", pulp.value(v))
예제 #59
0
                                     lowBound=0,
                                     cat="Integer")
        else:
            continue
    var.update(var)

# Model
model = LpProblem("Maximum_flow_problem", LpMaximize)

# Goal function
goal_function: List = []
for x in var.keys():
    if x[0] == source:
        goal_function.append(var[x])

model += lpSum(goal_function)

# Constrains
constrains_o: List = []
constrains_d: List = []

for node in nodes:
    for x in var.keys():

        if node == x[0]:
            constrains_o.append(var[x])
        elif node == x[1]:
            constrains_d.append(var[x])
        else:
            continue
예제 #60
0
    def recur_solve_debug(self, temp_leximin, leximin_counts, assigned_agents,
            updated_capacities, return_assignment=False):
        next_c_star = pulp.LpVariable(
            'next_leximin', 0, temp_leximin#, cat='Integer'
        )

        x = pulp.LpVariable.dicts(
            'assignment',
            [(agent_id, intv_id)
            for agent_id in range(self.n_agents)
            for intv_id in range(self.n_intvs)
            if agent_id not in assigned_agents],
            cat='Binary'
        )

        prob = pulp.LpProblem()
        prob += next_c_star

        # Assignment constraint
        for agent_id in range(self.n_agents):
            if agent_id not in assigned_agents:
                prob += pulp.lpSum(
                    x[(agent_id, intv_id)]
                    for intv_id in range(self.n_intvs)
                ) == 1

        # Capacity constraints
        for intv_id in range(self.n_intvs):
            prob += pulp.lpSum(
                x[(agent_id, intv_id)]
                for agent_id in range(self.n_agents)
                if agent_id not in assigned_agents
            ) <= updated_capacities[intv_id]

        # Leximin count constraints
        # fixed allocations do not contain non-unique lexima in this dict.
        for leximin in leximin_counts:
            prob += pulp.lpSum(
                x[(agent_id, intv_id)]
                for agent_id in range(self.n_agents)
                for intv_id in range(self.n_intvs)
                if agent_id not in assigned_agents and self.cost_matrix[agent_id, intv_id] == leximin
            ) == leximin_counts[leximin]

        # Constraints for next leximin
        for agent_id in range(self.n_agents):
            if agent_id not in assigned_agents:
                for intv_id in range(self.n_intvs):
                    temp_cost = self.cost_matrix[agent_id, intv_id]

                    if temp_cost not in leximin_counts:
                        prob += x[(agent_id, intv_id)] * temp_cost \
                            <= next_c_star

        status = prob.solve(solver=pulp.solvers.GUROBI_CMD())

        if pulp.LpStatus[status] == 'Optimal':
            if return_assignment:
                assignments = np.zeros((self.n_agents,), dtype=int)

                for agent_id in range(self.n_agents):
                    if agent_id not in assigned_agents:
                        for intv_id in range(self.n_intvs):
                            if x[(agent_id, intv_id)].varValue == 1:
                                assignments[agent_id] = intv_id

                return next_c_star.varValue, assignments

            return next_c_star.varValue

        return False