def test_change_of_objective_is_reflected_in_low_level_solver(self):
        x = Variable('x', lb=-83.3, ub=1324422.)
        y = Variable('y', lb=-181133.3, ub=12000.)
        objective = Objective(0.3 * x + 0.4 * y, name='test', direction='max')
        self.model.objective = objective

        self.assertEqual((self.model.objective.expression -
                          (0.4 * y + 0.3 * x)).expand() - 0, 0)
        self.assertEqual(self.model.objective.direction, "max")

        self.assertEqual(glp_get_obj_coef(self.model.problem, x._index), 0.3)
        self.assertEqual(glp_get_obj_coef(self.model.problem, y._index), 0.4)
        for i in range(1, glp_get_num_cols(self.model.problem) + 1):
            if i != x._index and i != y._index:
                self.assertEqual(glp_get_obj_coef(self.model.problem, i), 0)
        z = Variable('z', lb=4, ub=4, type='integer')
        self.model.objective += 77. * z

        self.assertEqual((self.model.objective.expression -
                          (0.4 * y + 0.3 * x + 77.0 * z)).expand() - 0, 0)
        self.assertEqual(self.model.objective.direction, "max")

        self.assertEqual(glp_get_obj_coef(self.model.problem, x._index), 0.3)
        self.assertEqual(glp_get_obj_coef(self.model.problem, y._index), 0.4)
        self.assertEqual(glp_get_obj_coef(self.model.problem, z._index), 77.)
        for i in range(1, glp_get_num_cols(self.model.problem) + 1):
            if i != x._index and i != y._index and i != z._index:
                self.assertEqual(glp_get_obj_coef(self.model.problem, i), 0)
Esempio n. 2
0
    def test_change_of_objective_is_reflected_in_low_level_solver(self):
        x = self.interface.Variable('x', lb=-83.3, ub=1324422.)
        y = self.interface.Variable('y', lb=-181133.3, ub=12000.)
        objective = self.interface.Objective(0.3 * x + 0.4 * y, name='test', direction='max')
        self.model.objective = objective

        self.assertEqual(
            (self.model.objective.expression - (0.4 * y + 0.3 * x)).expand() - 0, 0
        )
        self.assertEqual(self.model.objective.direction, "max")

        self.assertEqual(glp_get_obj_coef(self.model.problem, x._index), 0.3)
        self.assertEqual(glp_get_obj_coef(self.model.problem, y._index), 0.4)
        for i in range(1, glp_get_num_cols(self.model.problem) + 1):
            if i != x._index and i != y._index:
                self.assertEqual(glp_get_obj_coef(self.model.problem, i), 0)
        z = self.interface.Variable('z', lb=4, ub=4, type='integer')
        self.model.objective += 77. * z

        self.assertEqual(
            (self.model.objective.expression - (0.4 * y + 0.3 * x + 77.0 * z)).expand() - 0, 0
        )
        self.assertEqual(self.model.objective.direction, "max")

        self.assertEqual(glp_get_obj_coef(self.model.problem, x._index), 0.3)
        self.assertEqual(glp_get_obj_coef(self.model.problem, y._index), 0.4)
        self.assertEqual(glp_get_obj_coef(self.model.problem, z._index), 77.)
        for i in range(1, glp_get_num_cols(self.model.problem) + 1):
            if i != x._index and i != y._index and i != z._index:
                self.assertEqual(glp_get_obj_coef(self.model.problem, i), 0)
Esempio n. 3
0
 def get_linear_coefficients(self, variables):
     if self.problem is not None:
         return {
             var: glp_get_obj_coef(self.problem.problem, var._index)
             for var in variables
         }
     else:
         raise Exception(
             "Can't get coefficients from solver if objective is not in a model"
         )
Esempio n. 4
0
 def test_imul_objective(self):
     self.model.objective *= 2.
     obj_coeff = list()
     for i in range(len(self.model.variables)):
         obj_coeff.append(glp_get_obj_coef(self.model.problem, i))
     self.assertEqual(obj_coeff,
                      [0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0]
                      )
Esempio n. 5
0
 def test_iadd_objective(self):
     v2, v3 = self.model.variables.values()[1:3]
     self.model.objective += 2. * v2 - 3. * v3
     obj_coeff = list()
     for i in range(len(self.model.variables)):
         obj_coeff.append(glp_get_obj_coef(self.model.problem, i))
     self.assertEqual(obj_coeff,
                      [0.0, 1.0, 2.0, -3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
                       0.0]
                      )
 def test_imul_objective(self):
     self.model.objective *= 2.
     obj_coeff = list()
     for i in range(len(self.model.variables)):
         obj_coeff.append(glp_get_obj_coef(self.model.problem, i))
     self.assertEqual(obj_coeff, [
         0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0
     ])
 def test_iadd_objective(self):
     v2, v3 = self.model.variables.values()[1:3]
     self.model.objective += 2. * v2 - 3. * v3
     obj_coeff = list()
     for i in range(len(self.model.variables)):
         obj_coeff.append(glp_get_obj_coef(self.model.problem, i))
     self.assertEqual(obj_coeff, [
         0.0, 1.0, 2.0, -3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
         0.0, 0.0, 0.0, 0.0
     ])
Esempio n. 8
0
    def _opt_dir_str(self, zero_print):
        'get the optimization direction line for __str__'

        lp = self.lp
        cols = self.get_num_cols()
        rv = "min"

        for col in range(1, cols + 1):
            val = glpk.glp_get_obj_coef(lp, col)
            num = str(val)

            num = num.rjust(6)[:6]  # fix width to exactly 6

            if val == 0:
                rv += zero_print(num) + " "
            else:
                rv += num + " "

        rv += "\n"

        return rv
Esempio n. 9
0
    def _opt_dir_str(self, zero_print):
        'get the optimization direction line for __str__'

        lp = self.lp
        cols = self.get_num_cols()
        rv = "min"

        for col in range(1, cols + 1):
            val = glpk.glp_get_obj_coef(lp, col)
            num = str(val)

            if len(num) < 6:
                num = (" " * (6 - len(num))) + num
            else:
                num = num[0:6]

            if val == 0:
                rv += zero_print(num) + " "
            else:
                rv += num + " "

        rv += "\n"

        return rv
Esempio n. 10
0
    def _initialize_model_from_problem(self, problem):
        try:
            self.problem = problem
            glp_create_index(self.problem)
        except TypeError:
            raise TypeError("Provided problem is not a valid GLPK model.")
        row_num = glp_get_num_rows(self.problem)
        col_num = glp_get_num_cols(self.problem)
        for i in range(1, col_num + 1):
            var = Variable(
                glp_get_col_name(self.problem, i),
                lb=glp_get_col_lb(self.problem, i),
                ub=glp_get_col_ub(self.problem, i),
                problem=self,
                type=_GLPK_VTYPE_TO_VTYPE[
                    glp_get_col_kind(self.problem, i)]
            )
            # This avoids adding the variable to the glpk problem
            super(Model, self)._add_variables([var])
        variables = self.variables

        for j in range(1, row_num + 1):
            ia = intArray(col_num + 1)
            da = doubleArray(col_num + 1)
            nnz = glp_get_mat_row(self.problem, j, ia, da)
            constraint_variables = [variables[ia[i] - 1] for i in range(1, nnz + 1)]

            # Since constraint expressions are lazily retrieved from the solver they don't have to be built here
            # lhs = _unevaluated_Add(*[da[i] * constraint_variables[i - 1]
            #                         for i in range(1, nnz + 1)])
            lhs = 0

            glpk_row_type = glp_get_row_type(self.problem, j)
            if glpk_row_type == GLP_FX:
                row_lb = glp_get_row_lb(self.problem, j)
                row_ub = row_lb
            elif glpk_row_type == GLP_LO:
                row_lb = glp_get_row_lb(self.problem, j)
                row_ub = None
            elif glpk_row_type == GLP_UP:
                row_lb = None
                row_ub = glp_get_row_ub(self.problem, j)
            elif glpk_row_type == GLP_DB:
                row_lb = glp_get_row_lb(self.problem, j)
                row_ub = glp_get_row_ub(self.problem, j)
            elif glpk_row_type == GLP_FR:
                row_lb = None
                row_ub = None
            else:
                raise Exception(
                    "Currently, optlang does not support glpk row type %s"
                    % str(glpk_row_type)
                )
                log.exception()
            if isinstance(lhs, int):
                lhs = symbolics.Integer(lhs)
            elif isinstance(lhs, float):
                lhs = symbolics.Real(lhs)
            constraint_id = glp_get_row_name(self.problem, j)
            for variable in constraint_variables:
                try:
                    self._variables_to_constraints_mapping[variable.name].add(constraint_id)
                except KeyError:
                    self._variables_to_constraints_mapping[variable.name] = set([constraint_id])

            super(Model, self)._add_constraints(
                [Constraint(lhs, lb=row_lb, ub=row_ub, name=constraint_id, problem=self, sloppy=True)],
                sloppy=True
            )

        term_generator = (
            (glp_get_obj_coef(self.problem, index), variables[index - 1])
            for index in range(1, glp_get_num_cols(problem) + 1)
        )
        self._objective = Objective(
            symbolics.add(
                [symbolics.mul((symbolics.Real(term[0]), term[1])) for term in term_generator if
                 term[0] != 0.]
            ),
            problem=self,
            direction={GLP_MIN: 'min', GLP_MAX: 'max'}[glp_get_obj_dir(self.problem)])
        glp_scale_prob(self.problem, GLP_SF_AUTO)
Esempio n. 11
0
 def term_generator():
     for index in range(1, glp_get_num_cols(self.problem.problem) + 1):
         coeff = glp_get_obj_coef(self.problem.problem, index)
         if coeff != 0.:
             yield (symbolics.Real(coeff), variables[index - 1])
 def test_set_linear_coefficients_objective(self):
     self.model.objective.set_linear_coefficients(
         {self.model.variables.R_TPI: 666.})
     self.assertEqual(
         glp_get_obj_coef(self.model.problem,
                          self.model.variables.R_TPI._index), 666.)
Esempio n. 13
0
 def test_set_linear_coefficients_objective(self):
     self.model.objective.set_linear_coefficients({self.model.variables.R_TPI: 666.})
     self.assertEqual(glp_get_obj_coef(self.model.problem, self.model.variables.R_TPI._index), 666.)
Esempio n. 14
0
    def __init__(self, problem=None, *args, **kwargs):

        super(Model, self).__init__(*args, **kwargs)

        self.configuration = Configuration()

        if problem is None:
            self.problem = glp_create_prob()
            glp_create_index(self.problem)
            if self.name is not None:
                glp_set_prob_name(self.problem, str(self.name))

        else:
            try:
                self.problem = problem
                glp_create_index(self.problem)
            except TypeError:
                raise TypeError("Provided problem is not a valid GLPK model.")
            row_num = glp_get_num_rows(self.problem)
            col_num = glp_get_num_cols(self.problem)
            for i in range(1, col_num + 1):
                var = Variable(
                    glp_get_col_name(self.problem, i),
                    lb=glp_get_col_lb(self.problem, i),
                    ub=glp_get_col_ub(self.problem, i),
                    problem=self,
                    type=_GLPK_VTYPE_TO_VTYPE[
                        glp_get_col_kind(self.problem, i)]
                )
                # This avoids adding the variable to the glpk problem
                super(Model, self)._add_variables([var])
            variables = self.variables

            for j in range(1, row_num + 1):
                ia = intArray(col_num + 1)
                da = doubleArray(col_num + 1)
                nnz = glp_get_mat_row(self.problem, j, ia, da)
                constraint_variables = [variables[ia[i] - 1] for i in range(1, nnz + 1)]

                # Since constraint expressions are lazily retrieved from the solver they don't have to be built here
                # lhs = _unevaluated_Add(*[da[i] * constraint_variables[i - 1]
                #                         for i in range(1, nnz + 1)])
                lhs = 0

                glpk_row_type = glp_get_row_type(self.problem, j)
                if glpk_row_type == GLP_FX:
                    row_lb = glp_get_row_lb(self.problem, j)
                    row_ub = row_lb
                elif glpk_row_type == GLP_LO:
                    row_lb = glp_get_row_lb(self.problem, j)
                    row_ub = None
                elif glpk_row_type == GLP_UP:
                    row_lb = None
                    row_ub = glp_get_row_ub(self.problem, j)
                elif glpk_row_type == GLP_DB:
                    row_lb = glp_get_row_lb(self.problem, j)
                    row_ub = glp_get_row_ub(self.problem, j)
                elif glpk_row_type == GLP_FR:
                    row_lb = None
                    row_ub = None
                else:
                    raise Exception(
                        "Currently, optlang does not support glpk row type %s"
                        % str(glpk_row_type)
                    )
                    log.exception()
                if isinstance(lhs, int):
                    lhs = sympy.Integer(lhs)
                elif isinstance(lhs, float):
                    lhs = sympy.RealNumber(lhs)
                constraint_id = glp_get_row_name(self.problem, j)
                for variable in constraint_variables:
                    try:
                        self._variables_to_constraints_mapping[variable.name].add(constraint_id)
                    except KeyError:
                        self._variables_to_constraints_mapping[variable.name] = set([constraint_id])

                super(Model, self)._add_constraints(
                    [Constraint(lhs, lb=row_lb, ub=row_ub, name=constraint_id, problem=self, sloppy=True)],
                    sloppy=True
                )

            term_generator = (
                (glp_get_obj_coef(self.problem, index), variables[index - 1])
                for index in range(1, glp_get_num_cols(problem) + 1)
            )
            self._objective = Objective(
                _unevaluated_Add(
                    *[_unevaluated_Mul(sympy.RealNumber(term[0]), term[1]) for term in term_generator if
                      term[0] != 0.]),
                problem=self,
                direction={GLP_MIN: 'min', GLP_MAX: 'max'}[glp_get_obj_dir(self.problem)])
        glp_scale_prob(self.problem, GLP_SF_AUTO)
Esempio n. 15
0
 def term_generator():
     for index in range(1, glp_get_num_cols(self.problem.problem) + 1):
         coeff = glp_get_obj_coef(self.problem.problem, index)
         if coeff != 0.:
             yield (sympy.RealNumber(coeff), variables[index - 1])
Esempio n. 16
0
 def get_linear_coefficients(self, variables):
     if self.problem is not None:
         self.problem.update()
         return {var: glp_get_obj_coef(self.problem.problem, var._index) for var in variables}
     else:
         raise Exception("Can't get coefficients from solver if objective is not in a model")