Exemple #1
0
def get_solver():
    global solver
    try:
        return solver
    except NameError: pass

    solver = GLPK(None, msg=False, options=['--cuts'])
    if solver.available():
        print("Using solver from:", solver.path)
        return solver

    # There may be no glpsol. Let PuLP try to find another solver.
    print("Couldn't find 'glpsol' solver; a default may be found")
    solver = None
Exemple #2
0
def mclp_problem_solved(binary_coverage, binary_coverage2):
    problem = Problem.mclp([binary_coverage, binary_coverage2],
                           max_supply={
                               binary_coverage: 5,
                               binary_coverage2: 10
                           })
    problem.solve(GLPK())
    return problem
Exemple #3
0
def solve_case(args, number_of_edges):
    global_start_time = time.time()
    variables = initialize_variables(number_of_edges)

    # TODO: Subtract repeater/re-encoder costs

    constraints = get_constraints(variables)

    if args.debug:
        for i in range(len(constraints)):
            constraints[i] += debug()

    prob_type = LpMinimize if args.debug else LpMaximize
    prob = LpProblem("interkontinental-asymmetric", prob_type)
    objective = get_objective(variables, number_of_edges) if not args.debug \
        else sum(_debug_vars)
    logger.info('Objective: %s %s', LpSenses[prob.sense], objective)
    prob += objective

    for constraint in constraints:
        prob += constraint

    starttime = time.time()
    pipe = io.StringIO()
    res = GLPK(pipe=pipe).solve(prob)
    endtime = time.time()

    output = pipe.getvalue()
    memstart = output.find('Memory used')

    for commodity in commodities():
        print 'K%d: %s -> %s' % (commodity, commodity.sender,
                                 commodity.receiver)

    if res < 0:
        print 'Unsolvable!'
        sys.exit(1)
    else:
        if args.debug:
            print_problem_constraints(prob)

        dump_nonzero_variables(prob)
        print

        print_solution(variables)
        print_bandwidth_usage(variables)

        print "Score =", value(prob.objective)
        print 'Found solution in %.3fs' % (endtime - starttime)
        with open('results.csv', 'a') as fh:
            memory_segment = ''.join(output[memstart:memstart + 20])
            memory_used = float(memory_segment.split()[2]) * 10**6
            solve_time = endtime - starttime
            build_time = starttime - global_start_time
            retrace_time = time.time() - endtime
            fh.write('%.3f,%.3f,%.3f,%d' %
                     (solve_time, build_time, retrace_time, memory_used) +
                     '\n')
Exemple #4
0
 def _formulate_and_solve(self, env_tick: int, init_inventory: np.ndarray,
                          demand: np.ndarray, supply: np.ndarray):
     problem = LpProblem(
         name=f"Citi_Bike_Repositioning_from_tick_{env_tick}",
         sense=LpMaximize,
     )
     self._init_variables(init_inventory=init_inventory)
     self._add_constraints(problem=problem, demand=demand, supply=supply)
     self._set_objective(problem=problem)
     problem.solve(GLPK(msg=0))
Exemple #5
0
def parseRequestBody(bodyString):

    parsedJson = bodyString

    function = parsedJson['function']
    constraints = parsedJson['restrictions']

    objective = LpMaximize
    op = LpMaximize if function['operation'].upper() == 'MAX' else LpMinimize
    problem = LpProblem("Problem", objective)
    function_coefficients = function['coefficients']

    problem_vars = [
        LpVariable("x" + str(x + 1), 0, cat="Integer")
        for x in range(0, len(function_coefficients))
    ]

    problem += lpSum([
        function_coefficients[i] * problem_vars[i]
        for i in range(0, len(function_coefficients))
    ])

    for restriction in constraints:

        coefficients = restriction['coefficients']
        sense = restriction['type']
        bound = restriction['value']

        if sense.upper() == 'LT':
            problem += lpSum([
                problem_vars[i] * coefficients[i]
                for i in range(0, len(coefficients))
            ]) <= bound
        elif sense.upper() == 'EQ':
            problem += lpSum([
                problem_vars[i] * coefficients[i]
                for i in range(0, len(coefficients))
            ]) == bound
        elif sense.upper() == 'GT':
            problem += lpSum([
                problem_vars[i] * coefficients[i]
                for i in range(0, len(coefficients))
            ]) >= bound

    status = problem.solve(GLPK())

    response = {
        'status': LpStatus[status].upper(),
        'values':
        [value(problem.objective)] + [value(var) for var in problem_vars]
    }

    return response
Exemple #6
0
def main():
    """
    load the file resulted from the naive algorithm. The file loaded contains 3 fields in each line
    The first field is the adjacency matrix of 15 nodes graph in a format of 1 array of 225 cells of zeros and ones.
    The second field contains 1 array of the optimal solution. This array consists of 15 cells of zeros and ones where
     a one at a place i means that node i is in the vertex cover. The third field is an integer indicating the size of
     the optimal vertex cover.
    :return: nothing
    """
    df = pd.read_csv('linkedGraphsWithSolutions2.csv', engine='python')
    # n_nodes is the size of the graph, it can be change to any size
    n_nodes = 15
    model = LpProblem(name="find-minimum-vertex-cover", sense=LpMinimize)
    x = {
        i: LpVariable(name=f"x{i}", lowBound=0, cat="Binary")
        for i in range(0, n_nodes)
    }
    model += lpSum([x.values()])
    file = open(
        "size_of_minimum_vertex_cover_from_linear_programing_algorithm_y", "w")
    count_diffs = 0
    # since df.size includes 3 fields for each line, the number of lines is df.size/3
    num_of_graphs = int(df.size / 3)
    for k in range(num_of_graphs):
        # clear all constraints before adding the constraints for a new graph
        model.constraints.clear()
        # use jason loads to convert the string format of the adjacency matrix to an array
        e = json.loads(df.iloc[k][0])
        #  res contains the size of the minimum vertex cover of the graph which given by the naive algorithm
        res = df.iloc[k][2]
        # for every edge between xi to xj, add a constraint - xi+xj>=1 to the model
        for i in range(0, n_nodes):
            for j in range(0, n_nodes):
                if i < j and e[i * n_nodes + j] == 1:
                    model += (x[i] + x[j] >= 1,
                              "edge " + str(i) + "--" + str(j))
                    print("b", str(i) + str(j))
        #  run the GLPK algorithm
        model.solve(solver=GLPK(msg=True))
        # print the results the GLPK algorithm
        print(f"status: {model.status}, {LpStatus[model.status]}")
        print(f"objective: {model.objective.value()}")
        for var in model.variables():
            print(f"{var.name}: {var.value()}")
        for name, constraint in model.constraints.items():
            print(f"{name}: {constraint.value()}")
        if model.objective.value() != res:
            print("difference at line ", k)
            count_diffs += 1
        file.write(str(model.objective.value()) + "\n")
    print("the total number of differences is: ", count_diffs)
    return 0
Exemple #7
0
def get_solver():
    global solver
    try:
        return solver
    except NameError: pass
    
    # Windows: try to find glpsol in pulp\solverdir\glpsol.exe
    paths = [here('pulp', 'solverdir', 'glpsol.exe')]
    # Try to find glpsol in PATH
    path = distutils.spawn.find_executable('glpsol')
    if path:
        paths.append(os.path.abspath(path))
    
    for path in paths:
        solver = GLPK(path, msg=0, options=['--cuts'])
        if solver.available():
            print("Using GLPK:", path)
            return solver
    
    # There may be no glpsol. Let PuLP try to find a solver.
    print("No solver; a default may be found")
    solver = None
Exemple #8
0
def pulp_solver(G, h, A, b, c, n):
    # First, create a variable for each of the columsn of G and A.
    #
    # pre-condition: G and A have the same number of columns.
    #
    # The second argument specifies a lower bound for the variable, so we can
    # safely ignore the inequality constraints given by G and h.
    variables = [LpVariable('s{}'.format(i), 0) for i in range(G.shape[1])]
    # LpVariable has a second argument that allows you to specify a lower bound
    # for the variable (for example, x1 >= 0). We don't specify nonnegativity
    # here, because it is already specified by the inequality constraints G and
    # h.
    #variables = [LpVariable('s{}'.format(i)) for i in range(G.shape[1])]
    # Next, create a problem context object and add the objective function c to
    # it. The first object added to LpProblem is implicitly interpreted as the
    # objective function.
    problem = LpProblem('fraciso', LpMinimize)
    # The np.dot() function doesn't like mixing numbers and LpVariable objects,
    # so we compute the dot product ourselves.
    #
    #problem += np.dot(variables, c), 'Dummy objective function'
    problem += _pulp_dot_product(c, variables), 'Dummy objective function'
    # Add each equality constraint to the problem context.
    for i, (row, b_value) in enumerate(zip(A, b)):
        #problem += np.dot(row, variables), 'Constraint {}'.format(i)
        # Convert the row to a list so pulp has an easier time dealing with it.
        row_as_list = np.asarray(row).flatten().tolist()
        dot_product = _pulp_dot_product(row_as_list, variables)
        problem += dot_product == b_value, 'Constraint {}'.format(i)
    solver_backend = GLPK()
    #solver_backend = COIN()
    problem.solve(solver_backend)
    if problem.status == LpStatusOptimal:
        # PuLP is silly and sorts the variables by name before returning them,
        # so we need to re-sort them in numerical order.
        solution = [
            s.varValue
            for s in sorted(problem.variables(), key=lambda s: int(s.name[1:]))
        ]
        return True, solution
    # TODO status could be unknown here, but we're currently ignoring that
    return False, None
    def __init__(self, config: DottableDict, pm_capacity: List[IlpPmCapacity],
                 logger: Logger, log_path: str):
        self._logger = logger
        self._log_path = log_path

        self._pm_capacity = pm_capacity
        self._pm_num = len(self._pm_capacity)

        # LP solver.
        msg = 1 if config.log.stdout_solver_message else 0
        if config.solver == "GLPK":
            self._solver = GLPK(msg=msg)
        elif config.solver == "CBC":
            self._solver = PULP_CBC_CMD(msg=msg)
        else:
            print(
                f"Solver {config.solver} not added in ILP, choose from [GLPK, CBC]"
            )
            exit(0)

        # For formulation and action application.
        self.plan_window_size = config.plan_window_size
        self.apply_buffer_size = config.apply_buffer_size

        # For performance.
        self.core_upper_ratio = 1 - config.performance.core_safety_remaining_ratio
        self.mem_upper_ratio = 1 - config.performance.mem_safety_remaining_ratio

        # For objective.
        self.successful_allocation_decay = config.objective.successful_allocation_decay
        self.allocation_multiple_core_num = config.objective.allocation_multiple_core_num

        # For logger.
        self.dump_all_solution = config.log.dump_all_solution
        self.dump_infeasible_solution = config.log.dump_infeasible_solution

        # For problem formulation and application
        self.last_solution_env_tick = -1
#Create the model
model = LpProblem(name = "Wyndor_Glass_Co", sense = LpMaximize)

#Initialize the decision variables
x1 = LpVariable(name = "x1", lowBound = 0)
x2 = LpVariable(name = "x2", lowBound = 0)

#Add the constraints to the model
model += (x1 <= 4, "Plant 1")
model += (2*x2 <= 12, "Plant 2")
model += (3*x1 + 2*x2 <= 18, "Plant 3")

#Add the objective function to the model
model += lpSum([3*x1, 5*x2])

#Solve the problem
status = model.solve(solver = GLPK(msg = False))

print(f"Status: {model.status}, {LpStatus[model.status]}")
print(f"Objective: {model.objective.value()}")

for var in model.variables():
    print(f"{var.name}: {var.value()}")
    
for name, constraint in model.constraints.items():
    print(f"{name}: {constraint.value()}")
    
print(model.variables())
print(model.variables()[0] is x1)
print(model.variables()[1] is x2)
print(model.solver)
Exemple #11
0
    def optimal_routing_delay(self, tm_idx):
        assert tm_idx in self.load_multiplier, (tm_idx)
        tm = self.traffic_matrices[tm_idx] * self.load_multiplier[tm_idx]
        demands = {}
        for i in range(self.num_pairs):
            s, d = self.pair_idx_to_sd[i]
            demands[i] = tm[s][d]

        model = LpProblem(name="routing")

        ratio = LpVariable.dicts(name="ratio",
                                 indexs=self.pair_links,
                                 lowBound=0,
                                 upBound=1)

        link_load = LpVariable.dicts(name="link_load", indexs=self.links)

        f = LpVariable.dicts(name="link_cost", indexs=self.links)

        for pr in self.lp_pairs:
            model += (lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][0]
            ]) - lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][0]
            ]) == -1, "flow_convervation_constr1_%d" % pr)

        for pr in self.lp_pairs:
            model += (lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][1]
            ]) - lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][1]
            ]) == 1, "flow_convervation_constr2_%d" % pr)

        for pr in self.lp_pairs:
            for n in self.lp_nodes:
                if n not in self.pair_idx_to_sd[pr]:
                    model += (lpSum([
                        ratio[pr, e[0], e[1]]
                        for e in self.lp_links if e[1] == n
                    ]) - lpSum([
                        ratio[pr, e[0], e[1]]
                        for e in self.lp_links if e[0] == n
                    ]) == 0, "flow_convervation_constr3_%d_%d" % (pr, n))

        for e in self.lp_links:
            ei = self.link_sd_to_idx[e]
            model += (link_load[ei] == lpSum([
                demands[pr] * ratio[pr, e[0], e[1]] for pr in self.lp_pairs
            ]), "link_load_constr%d" % ei)
            model += (f[ei] * self.link_capacities[ei] >= link_load[ei],
                      "cost_constr1_%d" % ei)
            model += (f[ei] >=
                      3 * link_load[ei] / self.link_capacities[ei] - 2 / 3,
                      "cost_constr2_%d" % ei)
            model += (f[ei] >=
                      10 * link_load[ei] / self.link_capacities[ei] - 16 / 3,
                      "cost_constr3_%d" % ei)
            model += (f[ei] >=
                      70 * link_load[ei] / self.link_capacities[ei] - 178 / 3,
                      "cost_constr4_%d" % ei)
            model += (f[ei] >= 500 * link_load[ei] / self.link_capacities[ei] -
                      1468 / 3, "cost_constr5_%d" % ei)
            model += (
                f[ei] >=
                5000 * link_load[ei] / self.link_capacities[ei] - 16318 / 3,
                "cost_constr6_%d" % ei)

        model += lpSum(f[ei] for ei in self.links)

        model.solve(solver=GLPK(msg=False))
        assert LpStatus[model.status] == 'Optimal'

        solution = {}
        for k in ratio:
            solution[k] = ratio[k].value()

        return solution
Exemple #12
0
    def optimal_routing_mlu_critical_pairs(self, tm_idx, critical_pairs):
        tm = self.traffic_matrices[tm_idx]

        pairs = critical_pairs

        demands = {}
        background_link_loads = np.zeros((self.num_links))
        for i in range(self.num_pairs):
            s, d = self.pair_idx_to_sd[i]
            #background link load
            if i not in critical_pairs:
                self.ecmp_next_hop_distribution(background_link_loads,
                                                tm[s][d], s, d)
            else:
                demands[i] = tm[s][d]

        model = LpProblem(name="routing")

        pair_links = [(pr, e[0], e[1]) for pr in pairs for e in self.lp_links]
        ratio = LpVariable.dicts(name="ratio",
                                 indexs=pair_links,
                                 lowBound=0,
                                 upBound=1)

        link_load = LpVariable.dicts(name="link_load", indexs=self.links)

        r = LpVariable(name="congestion_ratio")

        for pr in pairs:
            model += (lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][0]
            ]) - lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][0]
            ]) == -1, "flow_convervation_constr1_%d" % pr)

        for pr in pairs:
            model += (lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][1]
            ]) - lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][1]
            ]) == 1, "flow_convervation_constr2_%d" % pr)

        for pr in pairs:
            for n in self.lp_nodes:
                if n not in self.pair_idx_to_sd[pr]:
                    model += (lpSum([
                        ratio[pr, e[0], e[1]]
                        for e in self.lp_links if e[1] == n
                    ]) - lpSum([
                        ratio[pr, e[0], e[1]]
                        for e in self.lp_links if e[0] == n
                    ]) == 0, "flow_convervation_constr3_%d_%d" % (pr, n))

        for e in self.lp_links:
            ei = self.link_sd_to_idx[e]
            model += (
                link_load[ei] == background_link_loads[ei] +
                lpSum([demands[pr] * ratio[pr, e[0], e[1]]
                       for pr in pairs]), "link_load_constr%d" % ei)
            model += (link_load[ei] <= self.link_capacities[ei] * r,
                      "congestion_ratio_constr%d" % ei)

        model += r + OBJ_EPSILON * lpSum([link_load[ei] for ei in self.links])

        model.solve(solver=GLPK(msg=False))
        assert LpStatus[model.status] == 'Optimal'

        obj_r = r.value()
        solution = {}
        for k in ratio:
            solution[k] = ratio[k].value()

        return obj_r, solution
Exemple #13
0
 def test_solver(self, binary_lscp_pulp_problem, binary_coverage):
     p = Problem(binary_lscp_pulp_problem, binary_coverage, 'lscp')
     p = p.solve(GLPK())
     assert (isinstance(p, Problem))
    def _selectSentences(self, wordlimit):
        """
        Optimally selects the sentences based on their bigrams
        """
        fullsentences = [
            removePOS(sentence) for cluster in self.candidates
            for sentence in cluster
        ]

        # remember the correspondance between a sentence and its cluster
        clusternums = {}
        sentencenums = {s: i for i, s in enumerate(fullsentences)}
        for i, cluster in enumerate(self.candidates):
            clusternums[i] = []
            for sentence in cluster:
                fullsentence = removePOS(sentence)
                if fullsentence in sentencenums:
                    clusternums[i].append(sentencenums[removePOS(sentence)])

        # extract bigrams for all sentences
        bigramssentences = [
            extractBigrams(sentence.split()) for sentence in fullsentences
        ]

        # get uniqs bigrams
        uniqbigrams = set(bigram for sentence in bigramssentences
                          for bigram in sentence)
        numbigrams = len(uniqbigrams)
        numsentences = len(fullsentences)

        # rewrite fullsentences
        fullsentences = [wellformatize(sentence) for sentence in fullsentences]

        # filter out rare bigrams
        weightedbigrams = {
            bigram: (count if count >= self.minbigramcount else 0)
            for bigram, count in self.bigramstats.items()
        }

        problem = pulp.LpProblem("Sentence selection", pulp.LpMaximize)

        # concept variables
        concepts = pulp.LpVariable.dicts(name='c',
                                         indexs=range(numbigrams),
                                         lowBound=0,
                                         upBound=1,
                                         cat='Integer')
        sentences = pulp.LpVariable.dicts(name='s',
                                          indexs=range(numsentences),
                                          lowBound=0,
                                          upBound=1,
                                          cat='Integer')

        # objective : maximize wi * ci (weighti * concepti)
        # small hack. If the bigram has been filtered out from uniqbigrams,
        # we give it a weight of 0.
        problem += sum([(weightedbigrams.get(bigram) or 0) * concepts[i]
                        for i, bigram in enumerate(uniqbigrams)])

        # constraints

        # size
        problem += sum([
            sentences[j] * len(fullsentences[j].split())
            for j in range(numsentences)
        ]) <= wordlimit

        # integrity constraints (link between concepts and sentences)
        for j, bsentence in enumerate(bigramssentences):
            for i, bigram in enumerate(uniqbigrams):
                if bigram in bsentence:
                    problem += sentences[j] <= concepts[i]

        for i, bigram in enumerate(uniqbigrams):
            problem += sum([
                sentences[j] for j, bsentence in enumerate(bigramssentences)
                if bigram in bsentence
            ]) >= concepts[i]

        # select only one sentence per cluster
        for clusternum, clustersentences in clusternums.items():
            problem += sum([sentences[j] for j in clustersentences]) <= 1

        # solve the problem
        problem.solve(GLPK(msg=0))

        summary = []
        # get the sentences back
        for j in range(numsentences):
            if sentences[j].varValue == 1:
                summary.append(fullsentences[j])

        return summary
Exemple #15
0
from pulp import GLPK
from pulp import PULP_CBC_CMD

logger = daiquiri.getLogger(__name__)

solvers = {
    'pulp_cbc_cmd': {
        'function': scheduler.solution,
        'kwargs': {
            'solver': PULP_CBC_CMD(msg=False)
        }
    },
    'glpk': {
        'function': scheduler.solution,
        'kwargs': {
            'solver': GLPK(msg=False)
        }
    },
    'hill_climber': {
        'function': scheduler.heuristic,
        'kwargs': {
            'algorithm': hill_climber
        }
    },
    'simulated_annealing': {
        'function': scheduler.heuristic,
        'kwargs': {
            'algorithm': simulated_annealing
        }
    }
}
Exemple #16
0
        problem += delta[5][i] * N <= lpSum(V[s][a][i]
                                            for a in range(0, s - 1))
        problem += delta[1][i] + delta[2][i] <= 1
        problem += delta[1][i] + delta[3][i] <= 1
        problem += delta[4][i] + delta[5][i] <= 1

# Fonction objective ##############

problem += costs.maintainance(lpSum(c[i][s] for i in [0, 1]
                                    for s in semesters)) + \
           costs.salary(v_moy, na, n1, n2, semesters, cities_number, road_distances1, road_distances2) + \
           costs.fuel(fuel_price, conso, n1, n2, na, road_distances1, road_distances2, cities_number, semesters) + \
           costs.buying_trucks(c) + \
           costs.amortissement(V, semester_number)

problem.solve(solver=GLPK(msg=True, keepFiles=True, timeLimit=30))

data = problem.toDict()


class NpEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        else:
            return super(NpEncoder, self).default(obj)
    def _selectSentences(self, wordlimit):

        fullsentences = list(
            set([removePOS(sentence) for sentence in self.candidates]))

        # extract bigrams for all sentences
        bigramssentences = [
            extractBigrams(sentence.split()) for sentence in fullsentences
        ]

        # get uniqs bigrams
        uniqbigrams = set(bigram for sentence in bigramssentences
                          for bigram in sentence)
        numbigrams = len(uniqbigrams)

        # rewrite fullsentences

        fullsentences = [wellformatize(sentence) for sentence in fullsentences]

        numsentences = len(fullsentences)

        # filter out rare bigrams
        weightedbigrams = {
            bigram: sum([0.8**i for i in xrange(count)])
            for bigram, count in self.bigramstats.items()
        }

        problem = pulp.LpProblem("Sentence selection", pulp.LpMaximize)

        # concept variables
        concepts = pulp.LpVariable.dicts(name='c',
                                         indexs=range(numbigrams),
                                         lowBound=0,
                                         upBound=1,
                                         cat='Integer')

        sentences = pulp.LpVariable.dicts(name='s',
                                          indexs=range(numsentences),
                                          lowBound=0,
                                          upBound=1,
                                          cat='Integer')

        # objective : maximize wi * ci (weighti * concepti)
        # small hack. If the bigram has been filtered out from uniqbigrams,
        # we give it a weight of 0.
        problem += sum([(weightedbigrams.get(bigram) or 0) * concepts[i]
                        for i, bigram in enumerate(uniqbigrams)])

        # constraints

        # size
        problem += sum([
            sentences[j] * len(fullsentences[j].split())
            for j in xrange(numsentences)
        ]) <= wordlimit

        # integrity constraints (link between concepts and sentences)
        for j, bsentence in enumerate(bigramssentences):
            for i, bigram in enumerate(uniqbigrams):
                if bigram in bsentence:
                    problem += sentences[j] <= concepts[i]

        for i, bigram in enumerate(uniqbigrams):
            problem += sum([
                sentences[j] for j, bsentence in enumerate(bigramssentences)
                if bigram in bsentence
            ]) >= concepts[i]

        # solve the problem
        problem.solve(GLPK())

        summary = []
        # get the sentences back
        for j in range(numsentences):
            if sentences[j].varValue == 1:
                summary.append(fullsentences[j])

        return summary
x1 = LpVariable("x1", 0, None)  # x1>=0
x2 = LpVariable("x2", 0, None)  # x2>=0
x3 = LpVariable("x3", 0, None)  # x3>=0

# defines the problem
prob = LpProblem("problem", LpMaximize)

# defines the constraints
prob += x1 + x2 + x3 + x4 <= 300
prob += x1 + 2 * x2 + 3 * x3 + x4 <= 360

# defines the objective function to maximize
prob += 10 * x1 + 15 * x2 + 10 * x3 + 5 * x4

# solve the problem
status = prob.solve(GLPK(msg=0))
LpStatus[status]

# print the results
print("Pulp Solution for x1, x2, x3 and x4")
print(value(x1))
print(value(x2))
print(value(x3))
print(value(x4))
print(value(x5))
"""
Minimize w=22y1+44y2+33y3
Subject to:
y1+2y2+y3≥3
y1+y3≥3
3y1+2y2+2y3≥8
Exemple #19
0
    for c2 in range(max_trucks_type1 + 1, max_trucks_type2):
        model += pos[c2][s] <= pos[c2 - 1][s]

print("Initialisation terminée")

model += n * costs.salary(x, y, distances, v_moy, max_trucks_type1) + \
         costs.maintainance(sum(pos[c][s] for c in range(max_trucks) for s in semesters)) + \
         n * costs.fuel(x, y, distances, max_trucks_type1) + \
         costs.buying_trucks(A, semesters, max_trucks_type1, max_trucks_type2) - \
         costs.selling_trucks(V, semesters, max_trucks_type1, max_trucks_type2, selling_cost), 'Objective Function '

input("Press enter")
print("Solving")

#status = model.solve(solver=GLPK(msg=True, keepFiles=True))
status = model.solve(solver=GLPK(msg=True, keepFiles=True, timeLimit=3600*13))

data = model.toDict()


class NpEncoder(json.JSONEncoder):
    def default(self, obj):
        if isinstance(obj, np.integer):
            return int(obj)
        elif isinstance(obj, np.floating):
            return float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        else:
            return super(NpEncoder, self).default(obj)