def test_parse_non_expanded_quadratic_expression(self):
        x, y, z = self.vars[:3]

        offset = 5
        expr = (x + y)**2 - (z - 2)**2 + offset
        target = {
            frozenset([x]): 1,
            frozenset([y]): 1,
            frozenset([x, y]): 2,
            frozenset([z]): -1
        }
        linear_target = {z: 4}

        constraint = Constraint(expr, lb=0)
        offset_const, linear_terms_const, quad_terms_const = parse_optimization_expression(
            constraint, quadratic=True)
        offset_obj, linear_terms_obj, quad_terms_obj = parse_optimization_expression(
            Objective(expr), expression=expr, linear=False)

        self.assertEqual(offset_const - constraint.lb, -4 + offset)
        self.assertEqual(offset_obj, -4 + offset)
        _compare_term_dicts(self, linear_terms_const, linear_target)
        _compare_term_dicts(self, linear_terms_obj, linear_target)
        _compare_term_dicts(self, quad_terms_const, target)
        _compare_term_dicts(self, quad_terms_obj, target)
    def test_parse_linear_expression(self):
        x, y, z = self.vars[:3]
        expr = 1 * x + 2 * y - 3 * z
        target = {x: 1, y: 2, z: -3}

        linear_terms_const, quad_terms_const = parse_optimization_expression(Constraint(expr, lb=0))
        linear_terms_obj, quad_terms_obj = parse_optimization_expression(Objective(expr), linear=False)

        self.assertEqual(linear_terms_const, target)
        self.assertEqual(linear_terms_obj, target)
        self.assertEqual(quad_terms_const, {})
        self.assertEqual(quad_terms_obj, {})
    def test_parse_quadratic_expression(self):
        x, y, z = self.vars[:3]

        expr = 2 * x**2 + 3 * x * y - 4 * z**2
        target = {frozenset([x]): 2, frozenset([z]): -4, frozenset([x, y]): 3}

        linear_terms_const, quad_terms_const = parse_optimization_expression(Constraint(expr, lb=0), linear=False)
        linear_terms_obj, quad_terms_obj = parse_optimization_expression(Objective(expr), quadratic=True)

        self.assertEqual(linear_terms_const, {})
        self.assertEqual(linear_terms_obj, {})
        self.assertEqual(quad_terms_const, target)
        self.assertEqual(quad_terms_obj, target)
    def test_parse_non_expanded_quadratic_expression(self):
        x, y, z = self.vars[:3]

        expr = (x + y)**2 - (z - 2)**2
        target = {frozenset([x]): 1, frozenset([y]): 1, frozenset([x, y]): 2, frozenset([z]): -1}
        linear_target = {z: 4}

        linear_terms_const, quad_terms_const = parse_optimization_expression(Constraint(expr, lb=0), quadratic=True)
        linear_terms_obj, quad_terms_obj = parse_optimization_expression(Objective(expr), linear=False)

        self.assertEqual(linear_terms_const, linear_target)
        self.assertEqual(linear_terms_obj, linear_target)
        self.assertEqual(quad_terms_const, target)
        self.assertEqual(quad_terms_obj, target)
Beispiel #5
0
    def test_parse_linear_expression(self):
        x, y, z = self.vars[:3]
        expr = 1 * x + 2 * y - 3 * z
        target = {x: 1, y: 2, z: -3}

        linear_terms_const, quad_terms_const = parse_optimization_expression(
            Constraint(expr, lb=0))
        linear_terms_obj, quad_terms_obj = parse_optimization_expression(
            Objective(expr), linear=False)

        self.assertEqual(linear_terms_const, target)
        self.assertEqual(linear_terms_obj, target)
        self.assertEqual(quad_terms_const, {})
        self.assertEqual(quad_terms_obj, {})
Beispiel #6
0
    def test_parse_linear_expression(self):
        x, y, z = self.vars[:3]
        offset = 3
        expr = 1 * x + 2 * y - 3 * z + offset
        target = {x: 1, y: 2, z: -3}

        offset_const, linear_terms_const, quad_terms_const = parse_optimization_expression(Constraint(expr, lb=0))
        offset_obj, linear_terms_obj, quad_terms_obj = parse_optimization_expression(Objective(expr), linear=False)

        self.assertEqual(offset_const, 0)
        self.assertEqual(offset_obj, offset)
        _compare_term_dicts(self, linear_terms_const, target)
        _compare_term_dicts(self, linear_terms_obj, target)
        self.assertEqual(quad_terms_const, {})
        self.assertEqual(quad_terms_obj, {})
Beispiel #7
0
    def test_parse_quadratic_expression(self):
        x, y, z = self.vars[:3]

        expr = 2 * x**2 + 3 * x * y - 4 * z**2
        target = {frozenset([x]): 2, frozenset([z]): -4, frozenset([x, y]): 3}

        linear_terms_const, quad_terms_const = parse_optimization_expression(
            Constraint(expr, lb=0), linear=False)
        linear_terms_obj, quad_terms_obj = parse_optimization_expression(
            Objective(expr), quadratic=True)

        self.assertEqual(linear_terms_const, {})
        self.assertEqual(linear_terms_obj, {})
        self.assertEqual(quad_terms_const, target)
        self.assertEqual(quad_terms_obj, target)
    def objective(self, value):
        super(Model, self.__class__).objective.fset(self, value)
        expression = self._objective._expression

        linear_coefficients, quadratic_coefficients = parse_optimization_expression(
            value, quadratic=True, expression=expression
        )
        grb_terms = []
        for var, coef in linear_coefficients.items():
            var = self.problem.getVarByName(var.name)
            grb_terms.append(coef * var)
        for key, coef in quadratic_coefficients.items():
            if len(key) == 1:
                var = six.next(iter(key))
                var = self.problem.getVarByName(var.name)
                grb_terms.append(coef * var * var)
            else:
                var1, var2 = key
                var1 = self.problem.getVarByName(var1.name)
                var2 = self.problem.getVarByName(var2.name)
                grb_terms.append(coef, var1, var2)

        grb_expression = gurobipy.quicksum(grb_terms)

        self.problem.setObjective(grb_expression,
                                  {'min': gurobipy.GRB.MINIMIZE, 'max': gurobipy.GRB.MAXIMIZE}[value.direction])
        value.problem = self
Beispiel #9
0
    def objective(self, value):
        value.problem = None
        if self._objective is not None:  # Reset previous objective
            variables = self.objective.variables
            if len(variables) > 0:
                name_list = [var.name for var in variables]
                index_dict = {n: i for n, i in zip(name_list, self._get_variable_indices(name_list))}
                self.problem.objective.set_linear([(index_dict[variable.name], 0.) for variable in variables])
            if self.problem.objective.get_num_quadratic_variables() > 0:
                self.problem.objective.set_quadratic([0. for _ in range(self.problem.variables.get_num())])
        super(Model, self.__class__).objective.fset(self, value)
        self.update()
        expression = self._objective._expression
        offset, linear_coefficients, quadratic_coeffients = parse_optimization_expression(value, quadratic=True, expression=expression)
        # self.problem.objective.set_offset(float(offset)) # Not available prior to 12.6.2
        self._objective_offset = offset
        if linear_coefficients:
            name_list = [var.name for var in linear_coefficients]
            index_dict = {n: i for n, i in zip(name_list, self._get_variable_indices(name_list))}
            self.problem.objective.set_linear([index_dict[var.name], float(coef)] for var, coef in linear_coefficients.items())

        for key, coef in quadratic_coeffients.items():
            if len(key) == 1:
                var = six.next(iter(key))
                self.problem.objective.set_quadratic_coefficients(var.name, var.name, float(coef) * 2)
            else:
                var1, var2 = key
                self.problem.objective.set_quadratic_coefficients(var1.name, var2.name, float(coef))


        self._set_objective_direction(value.direction)
        self.problem.objective.set_name(value.name)
        value.problem = self
Beispiel #10
0
    def objective(self, value):
        value.problem = None
        if self._objective is not None:  # Reset previous objective
            variables = self.objective.variables
            if len(variables) > 0:
                self.problem.objective.set_linear([(variable.name, 0.)
                                                   for variable in variables])
            if self.problem.objective.get_num_quadratic_variables() > 0:
                self.problem.objective.set_quadratic(
                    [0. for _ in range(self.problem.variables.get_num())])
        super(Model, self.__class__).objective.fset(self, value)
        self.update()
        expression = self._objective._expression
        linear_coefficients, quadratic_coeffients = parse_optimization_expression(
            value, quadratic=True, expression=expression)
        if linear_coefficients:
            self.problem.objective.set_linear(
                [var.name, float(coef)]
                for var, coef in linear_coefficients.items())

        for key, coef in quadratic_coeffients.items():
            if len(key) == 1:
                var = six.next(iter(key))
                self.problem.objective.set_quadratic_coefficients(
                    var.name, var.name,
                    float(coef) * 2)
            else:
                var1, var2 = key
                self.problem.objective.set_quadratic_coefficients(
                    var1.name, var2.name, float(coef))

        self._set_objective_direction(value.direction)
        self.problem.objective.set_name(value.name)
        value.problem = self
Beispiel #11
0
    def _add_constraints(self, constraints, sloppy=False):
        super(Model, self)._add_constraints(constraints, sloppy=sloppy)
        for constraint in constraints:
            if constraint.lb is None and constraint.ub is None:
                raise ValueError("optlang does not support free constraints in the gurobi interface.")
            self.problem.update()
            constraint._problem = None
            if constraint.is_Linear:
                offset, coef_dict, _ = parse_optimization_expression(constraint, linear=True)

                lhs = gurobipy.quicksum([coef * var._internal_variable for var, coef in coef_dict.items()])
                sense, rhs, range_value = _constraint_lb_and_ub_to_gurobi_sense_rhs_and_range_value(constraint.lb,
                                                                                                    constraint.ub)

                if range_value != 0:
                    aux_var = self.problem.addVar(name=constraint.name + '_aux', lb=0, ub=range_value)
                    self.problem.update()
                    lhs = lhs - aux_var

                self.problem.addConstr(lhs, sense, rhs, name=constraint.name)
            else:
                raise ValueError(
                    "GUROBI currently only supports linear constraints. %s is not linear." % self)
                # self.problem.addQConstr(lhs, sense, rhs)
            constraint.problem = self
        self.problem.update()
Beispiel #12
0
    def objective(self, value):
        super(Model, self.__class__).objective.fset(self, value)
        expression = self._objective._expression

        offset, linear_coefficients, quadratic_coefficients = parse_optimization_expression(
            value, quadratic=True, expression=expression)
        # self.problem.setAttr("ObjCon", offset) # Does not seem to work
        self._objective_offset = offset
        grb_terms = []
        for var, coef in linear_coefficients.items():
            var = self.problem.getVarByName(var.name)
            grb_terms.append(coef * var)
        for key, coef in quadratic_coefficients.items():
            if len(key) == 1:
                var = six.next(iter(key))
                var = self.problem.getVarByName(var.name)
                grb_terms.append(coef * var * var)
            else:
                var1, var2 = key
                var1 = self.problem.getVarByName(var1.name)
                var2 = self.problem.getVarByName(var2.name)
                grb_terms.append(coef, var1, var2)

        grb_expression = gurobipy.quicksum(grb_terms)

        self.problem.setObjective(grb_expression, {
            'min': gurobipy.GRB.MINIMIZE,
            'max': gurobipy.GRB.MAXIMIZE
        }[value.direction])
        value.problem = self
Beispiel #13
0
 def _add_constraints(self, constraints, sloppy=False):
     super(Model, self)._add_constraints(constraints, sloppy=sloppy)
     self.problem.reset()
     for constraint in constraints:
         constraint._problem = None  # This needs to be done in order to not trigger constraint._get_expression()
         if constraint.is_Linear:
             _, coeff_dict, _ = parse_optimization_expression(constraint)
             lb = -Infinity if constraint.lb is None else float(
                 constraint.lb)
             ub = Infinity if constraint.ub is None else float(
                 constraint.ub)
             self.problem.constraints.add(constraint.name)
             self.problem.constraint_coefs.update({
                 (constraint.name, v.name): float(co)
                 for v, co in six.iteritems(coeff_dict)
             })
             self.problem.constraint_lbs[constraint.name] = lb
             self.problem.constraint_ubs[constraint.name] = ub
             constraint.problem = self
         elif constraint.is_Quadratic:
             raise NotImplementedError(
                 "Quadratic constraints (like %s) are not supported "
                 "in OSQP yet." % constraint)
         else:
             raise ValueError(
                 "OSQP only supports linear or quadratic constraints. "
                 "%s is neither linear nor quadratic." % constraint)
Beispiel #14
0
    def _add_constraints(self, constraints, sloppy=False):
        super(Model, self)._add_constraints(constraints, sloppy=sloppy)
        for constraint in constraints:
            if constraint.lb is None and constraint.ub is None:
                raise ValueError("optlang does not support free constraints in the gurobi interface.")
            self.problem.update()
            constraint._problem = None
            if constraint.is_Linear:
                offset, coef_dict, _ = parse_optimization_expression(constraint, linear=True)

                lhs = gurobipy.quicksum([coef * var._internal_variable for var, coef in coef_dict.items()])
                sense, rhs, range_value = _constraint_lb_and_ub_to_gurobi_sense_rhs_and_range_value(constraint.lb,
                                                                                                    constraint.ub)

                if range_value != 0:
                    aux_var = self.problem.addVar(name=constraint.name + '_aux', lb=0, ub=range_value)
                    self.problem.update()
                    lhs = lhs - aux_var

                self.problem.addConstr(lhs, sense, rhs, name=constraint.name)
            else:
                raise ValueError(
                    "GUROBI currently only supports linear constraints. %s is not linear." % self)
                # self.problem.addQConstr(lhs, sense, rhs)
            constraint.problem = self
        self.problem.update()
Beispiel #15
0
    def _add_constraints(self, constraints, sloppy=False):
        super(Model, self)._add_constraints(constraints, sloppy=sloppy)
        for constraint in constraints:
            constraint._problem = None  # This needs to be done in order to not trigger constraint._get_expression()
            glp_add_rows(self.problem, 1)
            index = glp_get_num_rows(self.problem)
            glp_set_row_name(self.problem, index,
                             str(constraint.name).encode())
            num_cols = glp_get_num_cols(self.problem)
            index_array = ffi.new("int[{}]".format(num_cols +
                                                   1))  #intArray(num_cols + 1)
            value_array = ffi.new(
                "double[{}]".format(num_cols + 1))  #doubleArray(num_cols + 1)
            num_vars = 0  # constraint.variables is too expensive for large problems

            offset, coef_dict, _ = parse_optimization_expression(constraint,
                                                                 linear=True)

            num_vars = len(coef_dict)
            for i, (var, coef) in enumerate(coef_dict.items()):
                index_array[i + 1] = var._index
                value_array[i + 1] = float(coef)

            glp_set_mat_row(self.problem, index, num_vars, index_array,
                            value_array)
            constraint._problem = self
            self._glpk_set_row_bounds(constraint)
Beispiel #16
0
    def objective(self, value):
        value.problem = None
        if self._objective is not None:  # Reset previous objective
            variables = self.objective.variables
            if len(variables) > 0:
                name_list = [var.name for var in variables]
                index_dict = {n: i for n, i in zip(name_list, self._get_variable_indices(name_list))}
                self.problem.objective.set_linear([(index_dict[variable.name], 0.) for variable in variables])
            if self.problem.objective.get_num_quadratic_variables() > 0:
                self.problem.objective.set_quadratic([0. for _ in range(self.problem.variables.get_num())])
        super(Model, self.__class__).objective.fset(self, value)
        self.update()
        expression = self._objective._expression
        offset, linear_coefficients, quadratic_coeffients = parse_optimization_expression(value, quadratic=True, expression=expression)
        # self.problem.objective.set_offset(float(offset)) # Not available prior to 12.6.2
        self._objective_offset = offset
        if linear_coefficients:
            name_list = [var.name for var in linear_coefficients]
            index_dict = {n: i for n, i in zip(name_list, self._get_variable_indices(name_list))}
            self.problem.objective.set_linear([index_dict[var.name], float(coef)] for var, coef in linear_coefficients.items())

        for key, coef in quadratic_coeffients.items():
            if len(key) == 1:
                var = six.next(iter(key))
                self.problem.objective.set_quadratic_coefficients(var.name, var.name, float(coef) * 2)
            else:
                var1, var2 = key
                self.problem.objective.set_quadratic_coefficients(var1.name, var2.name, float(coef))


        self._set_objective_direction(value.direction)
        self.problem.objective.set_name(value.name)
        value.problem = self
    def objective(self, value):
        value.problem = None
        if self._objective is not None:  # Reset previous objective
            variables = self.objective.variables
            if len(variables) > 0:
                self.problem.objective.set_linear([(variable.name, 0.) for variable in variables])
            if self.problem.objective.get_num_quadratic_variables() > 0:
                self.problem.objective.set_quadratic([0. for _ in range(self.problem.variables.get_num())])
        super(Model, self.__class__).objective.fset(self, value)
        self.update()
        expression = self._objective._expression
        linear_coefficients, quadratic_coeffients = parse_optimization_expression(value, quadratic=True, expression=expression)
        if linear_coefficients:
            self.problem.objective.set_linear([var.name, float(coef)] for var, coef in linear_coefficients.items())

        for key, coef in quadratic_coeffients.items():
            if len(key) == 1:
                var = six.next(iter(key))
                self.problem.objective.set_quadratic_coefficients(var.name, var.name, float(coef) * 2)
            else:
                var1, var2 = key
                self.problem.objective.set_quadratic_coefficients(var1.name, var2.name, float(coef))


        self._set_objective_direction(value.direction)
        self.problem.objective.set_name(value.name)
        value.problem = self
Beispiel #18
0
 def _expr_to_mip_expr(self, expr):
     """Parses mip linear expression from expression."""
     if hasattr(expr, "expression") and symbolics.USE_SYMENGINE:
         expr._expression = expr.expression.expand()
     offset, coeffs, _ = parse_optimization_expression(expr)
     return offset + mip.xsum(
         to_float(coef) * self.problem.var_by_name('v_' + var.name)
         for var, coef in coeffs.items())
Beispiel #19
0
    def test_parse_quadratic_expression(self):
        x, y, z = self.vars[:3]

        offset = 4
        expr = 2 * x**2 + 3 * x * y - 4 * z**2 + offset
        target = {frozenset([x]): 2, frozenset([z]): -4, frozenset([x, y]): 3}

        offset_const, linear_terms_const, quad_terms_const = parse_optimization_expression(Constraint(expr, lb=0), linear=False)
        offset_obj, linear_terms_obj, quad_terms_obj = parse_optimization_expression(Objective(expr), quadratic=True)

        self.assertEqual(offset_const, 0)
        self.assertEqual(offset_obj, offset)
        self.assertEqual(linear_terms_const, {})
        self.assertEqual(linear_terms_obj, {})
        _compare_term_dicts(self, quad_terms_const, target)
        _compare_term_dicts(self, quad_terms_obj, target)
        self.assertEqual((_quad_terms_to_expression(quad_terms_obj) - (expr - offset)).expand(), 0)
        self.assertEqual((_quad_terms_to_expression(quad_terms_const) - (expr - offset)).expand(), 0)
Beispiel #20
0
 def objective(self, value):
     super(Model, Model).objective.fset(self, value)
     value.problem = None
     if value is None:
         self.problem.objective = {}
     else:
         offset, coefficients, _ = parse_optimization_expression(value)
         self.problem.objective = {var.name: coef for var, coef in coefficients.items()}
         self.problem.offset = offset
         self.problem.direction = value.direction
     value.problem = self
Beispiel #21
0
    def test_parse_non_expanded_quadratic_expression(self):
        x, y, z = self.vars[:3]

        expr = (x + y)**2 - (z - 2)**2
        target = {
            frozenset([x]): 1,
            frozenset([y]): 1,
            frozenset([x, y]): 2,
            frozenset([z]): -1
        }
        linear_target = {z: 4}

        linear_terms_const, quad_terms_const = parse_optimization_expression(
            Constraint(expr, lb=0), quadratic=True)
        linear_terms_obj, quad_terms_obj = parse_optimization_expression(
            Objective(expr), linear=False)

        self.assertEqual(linear_terms_const, linear_target)
        self.assertEqual(linear_terms_obj, linear_target)
        self.assertEqual(quad_terms_const, target)
        self.assertEqual(quad_terms_obj, target)
Beispiel #22
0
    def _add_constraints(self, constraints, sloppy=False):
        super(Model, self)._add_constraints(constraints, sloppy=sloppy)

        linear_constraints = dict(lin_expr=[],
                                  senses=[],
                                  rhs=[],
                                  range_values=[],
                                  names=[])
        for constraint in constraints:
            constraint._problem = None  # This needs to be done in order to not trigger constraint._get_expression()
            if constraint.is_Linear:
                offset, coeff_dict, _ = parse_optimization_expression(
                    constraint)

                sense, rhs, range_value = _constraint_lb_and_ub_to_cplex_sense_rhs_and_range_value(
                    constraint.lb, constraint.ub)
                indices = [var.name for var in coeff_dict]
                values = [float(val) for val in coeff_dict.values()]
                if constraint.indicator_variable is None:
                    linear_constraints['lin_expr'].append(
                        cplex.SparsePair(ind=indices, val=values))
                    linear_constraints['senses'].append(sense)
                    linear_constraints['rhs'].append(rhs)
                    linear_constraints['range_values'].append(range_value)
                    linear_constraints['names'].append(constraint.name)
                else:
                    if sense == 'R':
                        raise ValueError(
                            'CPLEX does not support indicator constraints that have both an upper and lower bound.'
                        )
                    else:
                        # Indicator constraints cannot be added in batch
                        self.problem.indicator_constraints.add(
                            lin_expr=cplex.SparsePair(ind=indices, val=values),
                            sense=sense,
                            rhs=rhs,
                            name=constraint.name,
                            indvar=constraint.indicator_variable.name,
                            complemented=abs(constraint.active_when) - 1)

            elif constraint.is_Quadratic:
                raise NotImplementedError(
                    'Quadratic constraints (like %s) are not supported yet.' %
                    constraint)
            else:
                raise ValueError(
                    "CPLEX only supports linear or quadratic constraints. %s is neither linear nor quadratic."
                    % constraint)
            constraint.problem = self
        self.problem.linear_constraints.add(**linear_constraints)
Beispiel #23
0
 def objective(self, value):
     super(Model, Model).objective.fset(self, value)
     value.problem = None
     if value is None:
         self.problem.objective = {}
     else:
         offset, coefficients, _ = parse_optimization_expression(value)
         self.problem.objective = {
             var.name: coef
             for var, coef in coefficients.items()
         }
         self.problem.offset = offset
         self.problem.direction = value.direction
     value.problem = self
    def objective(self, value):
        value.problem = None
        if self._objective is not None:
            variables = self.objective.variables
            for variable in variables:
                if variable._index is not None:
                    glp_set_obj_coef(self.problem, variable._index, 0.)
        super(Model, self.__class__).objective.fset(self, value)
        self.update()

        coef_dict, _ = parse_optimization_expression(value, linear=True)

        for var, coef in coef_dict.items():
            glp_set_obj_coef(self.problem, var._index, float(coef))

        glp_set_obj_dir(
            self.problem,
            {'min': GLP_MIN, 'max': GLP_MAX}[self._objective.direction]
        )
        value.problem = self
    def objective(self, value):
        value.problem = None
        if self._objective is not None:
            variables = self.objective.variables
            for variable in variables:
                if variable._index is not None:
                    glp_set_obj_coef(self.problem, variable._index, 0.)
        super(Model, self.__class__).objective.fset(self, value)
        self.update()

        coef_dict, _ = parse_optimization_expression(value, linear=True)

        for var, coef in coef_dict.items():
            glp_set_obj_coef(self.problem, var._index, float(coef))

        glp_set_obj_dir(self.problem, {
            'min': GLP_MIN,
            'max': GLP_MAX
        }[self._objective.direction])
        value.problem = self
    def _add_constraints(self, constraints, sloppy=False):
        super(Model, self)._add_constraints(constraints, sloppy=sloppy)

        linear_constraints = dict(lin_expr=[], senses=[], rhs=[], range_values=[], names=[])
        for constraint in constraints:
            constraint._problem = None  # This needs to be done in order to not trigger constraint._get_expression()
            if constraint.is_Linear:
                coeff_dict, _ = parse_optimization_expression(constraint)

                sense, rhs, range_value = _constraint_lb_and_ub_to_cplex_sense_rhs_and_range_value(
                    constraint.lb,
                    constraint.ub
                )
                indices = [var.name for var in coeff_dict]
                values = [float(val) for val in coeff_dict.values()]
                if constraint.indicator_variable is None:
                    linear_constraints['lin_expr'].append(cplex.SparsePair(ind=indices, val=values))
                    linear_constraints['senses'].append(sense)
                    linear_constraints['rhs'].append(rhs)
                    linear_constraints['range_values'].append(range_value)
                    linear_constraints['names'].append(constraint.name)
                else:
                    if sense == 'R':
                        raise ValueError(
                            'CPLEX does not support indicator constraints that have both an upper and lower bound.')
                    else:
                        # Indicator constraints cannot be added in batch
                        self.problem.indicator_constraints.add(
                            lin_expr=cplex.SparsePair(ind=indices, val=values), sense=sense, rhs=rhs,
                            name=constraint.name,
                            indvar=constraint.indicator_variable.name, complemented=abs(constraint.active_when) - 1)

            elif constraint.is_Quadratic:
                raise NotImplementedError('Quadratic constraints (like %s) are not supported yet.' % constraint)
            else:
                raise ValueError(
                    "CPLEX only supports linear or quadratic constraints. %s is neither linear nor quadratic." % constraint)
            constraint.problem = self
        self.problem.linear_constraints.add(**linear_constraints)
    def _add_constraints(self, constraints, sloppy=False):
        super(Model, self)._add_constraints(constraints, sloppy=sloppy)
        for constraint in constraints:
            constraint._problem = None  # This needs to be done in order to not trigger constraint._get_expression()
            glp_add_rows(self.problem, 1)
            index = glp_get_num_rows(self.problem)
            glp_set_row_name(self.problem, index, str(constraint.name))
            num_cols = glp_get_num_cols(self.problem)
            index_array = intArray(num_cols + 1)
            value_array = doubleArray(num_cols + 1)
            num_vars = 0  # constraint.variables is too expensive for large problems

            coef_dict, _ = parse_optimization_expression(constraint, linear=True)

            num_vars = len(coef_dict)
            for i, (var, coef) in enumerate(coef_dict.items()):
                index_array[i + 1] = var._index
                value_array[i + 1] = float(coef)

            glp_set_mat_row(self.problem, index, num_vars,
                            index_array, value_array)
            constraint._problem = self
            self._glpk_set_row_bounds(constraint)
Beispiel #28
0
    def objective(self, value):
        value.problem = None
        if self._objective is not None:  # Reset previous objective
            self.problem.obj_linear_coefs = dict()
            self.problem.obj_quadratic_coefs = dict()
        super(Model, self.__class__).objective.fset(self, value)
        self.update()
        expression = self._objective._expression
        (
            offset,
            linear_coefficients,
            quadratic_coeffients,
        ) = parse_optimization_expression(value,
                                          quadratic=True,
                                          expression=expression)
        self._objective_offset = offset
        if linear_coefficients:
            self.problem.obj_linear_coefs = {
                v.name: float(c)
                for v, c in six.iteritems(linear_coefficients)
            }

        for key, coef in six.iteritems(quadratic_coeffients):
            if len(key) == 1:
                var = six.next(iter(key))
                self.problem.obj_quadratic_coefs[(var.name,
                                                  var.name)] = float(coef)
            else:
                var1, var2 = key
                self.problem.obj_quadratic_coefs[(
                    var1.name, var2.name)] = 0.5 * float(coef)
                self.problem.obj_quadratic_coefs[(
                    var2.name, var1.name)] = 0.5 * float(coef)

        self._set_objective_direction(value.direction)
        value.problem = self