예제 #1
0
파일: test_con.py 프로젝트: Pyomo/pyomo
    def test_set_expr_inline(self):
        """Test expr= option (inline expression)"""
        model = ConcreteModel()
        model.A = RangeSet(1,4)
        model.x = Var(model.A,initialize=2)
        model.c = Constraint(expr=(0, sum(model.x[i] for i in model.A), 1))

        self.assertEqual(model.c(), 8)
        self.assertEqual(value(model.c.body), 8)
예제 #2
0
파일: test_con.py 프로젝트: Pyomo/pyomo
    def test_set_expr_explicit_multivariate(self):
        """Test expr= option (multivariate expression)"""
        model = ConcreteModel()
        model.A = RangeSet(1,4)
        model.x = Var(model.A, initialize=2)
        ans=0
        for i in model.A:
            ans = ans + model.x[i]
        ans = ans >= 0
        ans = ans <= 1
        model.c = Constraint(expr=ans)

        self.assertEqual(model.c(), 8)
        self.assertEqual(model.c.body(), 8)
        self.assertEqual(value(model.c.body), 8)
Created on Mon Apr  8 23:39:24 2019

@author: changlinli
"""

from pyomo.environ import ConcreteModel, AbstractModel, Param, RangeSet, Set, BuildAction, Var, Objective, Piecewise, minimize, value
from pyomo.environ import NonNegativeReals, Integers, Binary, PositiveIntegers
from pyomo.opt import SolverStatus, TerminationCondition
from pyomo.opt import SolverFactory

v = {}
v[1, 1] = 9
v[2, 2] = 16
v[3, 3] = 25
model = ConcreteModel()
model.A = RangeSet(1, 3)
model.B = RangeSet(1, 3)
model.P = Param(model.A, model.B)
model.S = Param(model.A, model.B, initialize=v, default=0)


def s_validate(model, v, i):
    return v > 3.14159


model.S = Param(model.A, validate=s_validate)


def s_init(model, i, j):
    if i == j:
        return i * i
예제 #4
0
파일: test_con.py 프로젝트: Pyomo/pyomo
 def create_model(self):
     model = ConcreteModel()
     model.A = Set(initialize=[1,2,3,4])
     return model
예제 #5
0
 def test_pickle_concrete_model_mutable_param(self):
     model = ConcreteModel()
     model.A = Param(initialize=1, mutable=True)
     str = pickle.dumps(model)
     tmodel = pickle.loads(str)
     self.verifyModel(model, tmodel)
예제 #6
0
 def test_pickle_concrete_model_var(self):
     model = ConcreteModel()
     model.A = Var(initialize=1)
     str = pickle.dumps(model)
     tmodel = pickle.loads(str)
     self.verifyModel(model, tmodel)
예제 #7
0
파일: OptiTope.py 프로젝트: lkuchenb/fred
    def __init__(self, results,  threshold=None, k=10, solver="glpk", verbosity=0):
        """
        :param result: Epitope prediction result object from which the epitope selection should be performed
        :type result: :class:`~Fred2.Core.Result.EpitopePredictionResult`
        :param dict(str,float) threshold: A dictionary scoring the binding thresholds for each HLA
                                          :class:`~Fred2.Core.Allele.Allele` key = allele name; value = the threshold
        :param int k: The number of epitopes to select
        :param str solver: The solver to be used (default glpk)
        :param int verbosity: Integer defining whether additional debugg prints are made >0 => debug mode
        """

        #check input data
        if not isinstance(results, EpitopePredictionResult):
            raise ValueError("first input parameter is not of type EpitopePredictionResult")

        _alleles = copy.deepcopy(results.columns.values.tolist())

        #test if allele prob is set, if not set allele prob uniform
        #if only partly set infer missing values (assuming uniformity of missing value)
        prob = []
        no_prob = []
        for a in _alleles:
            if a.prob is None:
                no_prob.append(a)
            else:
                prob.append(a)

        if len(no_prob) > 0:
            #group by locus
            no_prob_grouped = {}
            prob_grouped = {}
            for a in no_prob:
                no_prob_grouped.setdefault(a.locus, []).append(a)
            for a in prob:
                prob_grouped.setdefault(a.locus, []).append(a)

            for g, v in no_prob_grouped.iteritems():
                total_loc_a = len(v)
                if g in prob_grouped:
                    remaining_mass = 1.0 - sum(a.prob for a in prob_grouped[g])
                    for a in v:
                        a.prob = remaining_mass/total_loc_a
                else:
                    for a in v:
                        a.prob = 1.0/total_loc_a
        probs = {a.name:a.prob for a in _alleles}
        if verbosity:
            for a in _alleles:
                print a.name, a.prob

        #start constructing model
        self.__solver = SolverFactory(solver)
        self.__verbosity = verbosity
        self.__changed = True
        self.__alleleProb = _alleles
        self.__k = k
        self.__result = None
        self.__thresh = {} if threshold is None else threshold

        # Variable, Set and Parameter preparation
        alleles_I = {}
        variations = []
        epi_var = {}
        imm = {}
        peps = {}
        cons = {}

        #unstack multiindex df to get normal df based on first prediction method
        #and filter for binding epitopes
        method = results.index.values[0][1]
        res_df = results.xs(results.index.values[0][1], level="Method")
        res_df = res_df[res_df.apply(lambda x: any(x[a] > self.__thresh.get(a.name, -float("inf"))
                                                   for a in res_df.columns), axis=1)]

        for tup in res_df.itertuples():
            p = tup[0]
            seq = str(p)
            peps[seq] = p
            for a, s in itr.izip(res_df.columns, tup[1:]):
                if method in ["smm", "smmpmbec", "arb", "comblibsidney"]:
                    try:
                        thr = min(1., max(0.0, 1.0 - math.log(self.__thresh.get(a.name),
                                                      50000))) if a.name in self.__thresh else -float("inf")
                    except:
                        thr = 0

                    if s >= thr:
                        alleles_I.setdefault(a.name, set()).add(seq)
                    imm[seq, a.name] = min(1., max(0.0, 1.0 - math.log(s, 50000)))
                else:
                    if s > self.__thresh.get(a.name, -float("inf")):
                        alleles_I.setdefault(a.name, set()).add(seq)
                    imm[seq, a.name] = s

            prots = set(pr for pr in p.get_all_proteins())
            cons[seq] = len(prots)
            for prot in prots:
                variations.append(prot.gene_id)
                epi_var.setdefault(prot.gene_id, set()).add(seq)
        self.__peptideSet = peps

        #calculate conservation
        variations = set(variations)
        total = len(variations)
        for e, v in cons.iteritems():
            try:
                cons[e] = v / total
            except ZeroDivisionError:
                cons[e] = 1
        model = ConcreteModel()

        #set definition
        model.Q = Set(initialize=variations)

        model.E = Set(initialize=set(peps.keys()))

        model.A = Set(initialize=alleles_I.keys())
        model.E_var = Set(model.Q, initialize=lambda mode, v: epi_var[v])
        model.A_I = Set(model.A, initialize=lambda model, a: alleles_I[a])


        #parameter definition
        model.k = Param(initialize=self.__k, within=PositiveIntegers, mutable=True)
        model.p = Param(model.A, initialize=lambda model, a: probs[a])

        model.c = Param(model.E, initialize=lambda model, e: cons[e],mutable=True)

        #threshold parameters
        model.i = Param(model.E, model.A, initialize=lambda model, e, a: imm[e, a])
        model.t_allele = Param(initialize=0, within=NonNegativeIntegers, mutable=True)
        model.t_var = Param(initialize=0, within=NonNegativeIntegers, mutable=True)
        model.t_c = Param(initialize=0.0, within=NonNegativeReals, mutable=True)

        # Variable Definition
        model.x = Var(model.E, within=Binary)
        model.y = Var(model.A, within=Binary)
        model.z = Var(model.Q, within=Binary)

        # Objective definition
        model.Obj = Objective(
            rule=lambda model: sum(model.x[e] * sum(model.p[a] * model.i[e, a] for a in model.A) for e in model.E),
            sense=maximize)


        #Obligatory Constraint (number of selected epitopes)
        model.NofSelectedEpitopesCov = Constraint(rule=lambda model: sum(model.x[e] for e in model.E) <= model.k)

        #optional constraints (in basic model they are disabled)
        model.IsAlleleCovConst = Constraint(model.A,
                                            rule=lambda model, a: sum(model.x[e] for e in model.A_I[a]) >= model.y[a])
        model.MinAlleleCovConst = Constraint(rule=lambda model: sum(model.y[a] for a in model.A) >= model.t_allele)

        model.IsAntigenCovConst = Constraint(model.Q,
                                             rule=lambda model, q: sum(model.x[e] for e in model.E_var[q]) >= model.z[q])
        model.MinAntigenCovConst = Constraint(rule=lambda model: sum(model.z[q] for q in model.Q) >= model.t_var)
        model.EpitopeConsConst = Constraint(model.E,
                                            rule=lambda model, e: (1 - model.c[e]) * model.x[e] <= 1 - model.t_c)

        #generate instance
        self.instance = model
        if self.__verbosity > 0:
            print "MODEL INSTANCE"
            self.instance.pprint()

        #constraints
        self.instance.IsAlleleCovConst.deactivate()
        self.instance.MinAlleleCovConst.deactivate()
        self.instance.IsAntigenCovConst.deactivate()
        self.instance.MinAntigenCovConst.deactivate()
        self.instance.EpitopeConsConst.deactivate()
예제 #8
0
 def create_model(self):
     model = ConcreteModel()
     model.A = Set(initialize=[1, 2, 3, 4])
     return model
    def __init__(self,
                 results,
                 threshold=None,
                 dist_threshold=1.0,
                 distance={},
                 expression={},
                 uncertainty={},
                 overlap=0,
                 k=10,
                 k_taa=0,
                 solver="glpk",
                 verbosity=0,
                 include=[]):
        """
        :param results: Epitope prediction result object from which the epitope selection should be performed
        :type results: :class:`~Fred2.Core.Result.EpitopePredictionResult`
        :param dict(str,float) threshold: A dictionary scoring the binding thresholds for each HLA
                                          :class:`~Fred2.Core.Allele.Allele` key = allele name; value = the threshold
        :param float dist_threshold: Distance threshold: an epitope gets excluded if an epitope has dist-2-self score
                                    smaller or equal to this threshold for any HLA allele
        :param dict((str,str),float) distance: A dictionary with key: (peptide sequence, HLA name)
                                               and value the distance2self
        :param dict(str, float) expression: A dictionary with key: gene ID, and value: Gene expression
                                            in FPKM/RPKM or TPM
        :param dict((str,str),float) uncertainty: A dictionary with key (peptide seq, HLA name), and value the
                                                  associated uncertainty of the immunogenicity prediction
        :param int k: The number of epitopes to select
        :param int k_taa: The number of TAA epitopes to select
        :param str solver: The solver to be used (default glpk)
        :param int verbosity: Integer defining whether additional debug prints are made >0 => debug mode
        """

        # check input data
        if not isinstance(results, EpitopePredictionResult):
            raise ValueError(
                "first input parameter is not of type EpitopePredictionResult")

        _alleles = results.columns.values.tolist()

        # generate abundance dictionary of HLA alleles default is 2.0 as values will be log2 transformed
        probs = {
            a.name: 2.0 if a.get_metadata("abundance", only_first=True) is None
            else a.get_metadata("abundance", only_first=True)
            for a in _alleles
        }

        # start constructing model
        self.__solver = SolverFactory(solver)
        self.__verbosity = verbosity
        self.__changed = True
        self.__alleleProb = _alleles
        self.__k = k
        self.__k_taa = k_taa
        self.__result = None
        self.__thresh = {} if threshold is None else threshold
        self.__included = include
        self.overlap = overlap

        # variable, set and parameter preparation
        alleles_I = {}
        variations = []
        epi_var = {}
        imm = {}
        peps = {}
        taa = []
        var_epi = {}
        cons = {}

        for a in _alleles:
            alleles_I.setdefault(a.name, set())

        # unstack multiindex df to get normal df based on first prediction method
        # and filter for binding epitopes
        method = results.index.values[0][1]
        res_df = results.xs(results.index.values[0][1], level="Method")

        # if predcitions are not available for peptides/alleles, replace by 0
        res_df.fillna(0, inplace=True)

        res_df = res_df[res_df.apply(
            lambda x: any(x[a] > self.__thresh.get(a.name, -float("inf"))
                          for a in res_df.columns),
            axis=1)]

        res_df.fillna(0, inplace=True)
        # transform scores to 1-log50k(IC50) scores if neccassary
        # and generate mapping dictionaries for Set definitions
        for tup in res_df.itertuples():
            p = tup[0]
            seq = str(p)

            if any(
                    distance.get((seq, a.name), 1.0) <= dist_threshold
                    for a in _alleles):
                continue
            peps[seq] = p
            if p.get_metadata("taa", only_first=True):
                taa.append(seq)
            for a, s in itr.izip(res_df.columns, tup[1:]):
                if method in ["smm", "smmpmbec", "arb", "comblibsidney"]:
                    try:
                        thr = min(
                            1.,
                            max(
                                0.0, 1.0 -
                                math.log(self.__thresh.get(a.name), 50000))
                        ) if a.name in self.__thresh else -float("inf")
                    except:
                        thr = 0

                    if s >= thr:
                        alleles_I.setdefault(a.name, set()).add(seq)
                    imm[seq, a.name] = min(1.,
                                           max(0.0, 1.0 - math.log(s, 50000)))
                else:
                    if s > self.__thresh.get(a.name, -float("inf")):
                        alleles_I.setdefault(a.name, set()).add(seq)
                    imm[seq, a.name] = s

            prots = set(pr for pr in p.get_all_proteins())
            cons[seq] = len(prots)
            for prot in prots:
                variations.append(prot.gene_id)
                epi_var.setdefault(prot.gene_id, set()).add(seq)
                var_epi.setdefault(str(seq), set()).add(prot.gene_id)
        self.__peptideSet = peps

        # calculate conservation
        variations = set(variations)
        total = len(variations)
        for e, v in cons.iteritems():
            try:
                cons[e] = v / total
            except ZeroDivisionError:
                cons[e] = 1
        model = ConcreteModel()

        ######################################
        #
        # MODEL DEFINITIONS
        #
        ######################################

        # set definition
        model.Q = Set(initialize=variations)
        model.E = Set(initialize=set(peps.keys()))
        model.TAA = Set(initialize=set(taa))
        model.A = Set(initialize=alleles_I.keys())
        model.G = Set(model.E, initialize=lambda model, e: var_epi[e])
        model.E_var = Set(model.Q, initialize=lambda mode, v: epi_var[v])
        model.A_I = Set(model.A, initialize=lambda model, a: alleles_I[a])

        if self.__included is not None:
            if len(self.__included) > k:
                raise ValueError(
                    "More epitopes to include than epitopes to select! "
                    "Either raise k or reduce epitopes to include.")
        model.Include = Set(within=model.E, initialize=self.__included)

        if overlap > 0:

            def longest_common_substring(model):
                result = []
                for s1, s2 in itr.combinations(model.E, 2):
                    if s1 != s2:
                        if s1 in s2 or s2 in s1:
                            result.append((s1, s2))
                        m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
                        longest, x_longest = 0, 0
                        for x in xrange(1, 1 + len(s1)):
                            for y in xrange(1, 1 + len(s2)):
                                if s1[x - 1] == s2[y - 1]:
                                    m[x][y] = m[x - 1][y - 1] + 1
                                    if m[x][y] > longest:
                                        longest = m[x][y]
                                        x_longest = x
                                else:
                                    m[x][y] = 0
                        if len(s1[x_longest - longest:x_longest]) >= overlap:
                            result.append((s1, s2))
                return set(result)

            model.O = Set(dimen=2, initialize=longest_common_substring)

        # parameter definition
        model.k = Param(initialize=self.__k,
                        within=PositiveIntegers,
                        mutable=True)
        model.k_taa = Param(initialize=self.__k_taa,
                            within=NonNegativeIntegers,
                            mutable=True)
        model.p = Param(
            model.A,
            initialize=lambda model, a: max(0, math.log(probs[a] + 0.001, 2)))
        model.c = Param(model.E,
                        initialize=lambda model, e: cons[e],
                        mutable=True)
        model.sigma = Param(model.E,
                            model.A,
                            initialize=lambda model, e, a: uncertainty.get(
                                (e, a), 0))
        model.i = Param(model.E,
                        model.A,
                        initialize=lambda model, e, a: imm[e, a])
        model.t_allele = Param(initialize=0,
                               within=NonNegativeIntegers,
                               mutable=True)
        model.t_var = Param(initialize=0,
                            within=NonNegativeIntegers,
                            mutable=True)
        model.t_c = Param(initialize=0.0,
                          within=NonNegativeReals,
                          mutable=True)
        model.abd = Param(model.Q,
                          initialize=lambda model, g: max(
                              0, math.log(expression.get(g, 2) + 0.001, 2)))
        model.eps1 = Param(initialize=1e6, mutable=True)
        model.eps2 = Param(initialize=1e6, mutable=True)

        # variable Definition
        model.x = Var(model.E, within=Binary)
        model.y = Var(model.A, within=Binary)
        model.z = Var(model.Q, within=Binary)

        # objective definition
        model.Obj1 = Objective(rule=lambda model: -sum(model.x[e] * sum(
            model.abd[g] for g in model.G[e]) * sum(model.p[a] * model.i[e, a]
                                                    for a in model.A)
                                                       for e in model.E),
                               sense=minimize)
        model.Obj2 = Objective(
            rule=lambda model: sum(model.x[e] * sum(model.sigma[e, a]
                                                    for a in model.A)
                                   for e in model.E),
            sense=minimize)

        # constraints
        # obligatory Constraint (number of selected epitopes)
        model.NofSelectedEpitopesCov1 = Constraint(
            rule=lambda model: sum(model.x[e] for e in model.E) >= model.k)
        model.NofSelectedEpitopesCov2 = Constraint(
            rule=lambda model: sum(model.x[e] for e in model.E) <= model.k)
        model.NofSelectedTAACov = Constraint(rule=lambda model: sum(
            model.x[e] for e in model.TAA) <= model.k_taa)

        # optional constraints (in basic model they are disabled)
        model.IsAlleleCovConst = Constraint(
            model.A,
            rule=lambda model, a: sum(model.x[e]
                                      for e in model.A_I[a]) >= model.y[a])

        model.MinAlleleCovConst = Constraint(rule=lambda model: sum(
            model.y[a] for a in model.A) >= model.t_allele)

        model.IsAntigenCovConst = Constraint(
            model.Q,
            rule=lambda model, q: sum(model.x[e]
                                      for e in model.E_var[q]) >= model.z[q])
        model.MinAntigenCovConst = Constraint(
            rule=lambda model: sum(model.z[q] for q in model.Q) >= model.t_var)

        model.EpitopeConsConst = Constraint(
            model.E,
            rule=lambda model, e:
            (1 - model.c[e]) * model.x[e] <= 1 - model.t_c)

        if overlap > 0:
            model.OverlappingConstraint = Constraint(
                model.O,
                rule=lambda model, e1, e2: model.x[e1] + model.x[e2] <= 1)

        # constraints for Pareto optimization
        model.ImmConst = Constraint(rule=lambda model: sum(model.x[e] * sum(
            model.abd[g] for g in model.G[e]) * sum(model.p[a] * model.i[
                e, a] for a in model.A) for e in model.E) <= model.eps1)
        model.UncertaintyConst = Constraint(
            rule=lambda model: sum(model.x[e] * sum(model.sigma[
                e, a] for a in model.A) for e in model.E) <= model.eps2)
        self.__objectives = [model.Obj1, model.Obj2]
        self.__constraints = [model.UncertaintyConst, model.ImmConst]
        self.__epsilons = [model.eps2, model.eps1]

        # include constraint
        model.IncludeEpitopeConstraint = Constraint(
            model.Include, rule=lambda model, e: model.x[e] >= 1)

        # generate instance
        self.instance = model
        if self.__verbosity > 0:
            print "MODEL INSTANCE"
            self.instance.pprint()

        # constraints
        self.instance.Obj2.deactivate()
        self.instance.ImmConst.deactivate()
        self.instance.UncertaintyConst.deactivate()
        self.instance.IsAlleleCovConst.deactivate()
        self.instance.MinAlleleCovConst.deactivate()
        self.instance.IsAntigenCovConst.deactivate()
        self.instance.MinAntigenCovConst.deactivate()
        self.instance.EpitopeConsConst.deactivate()
예제 #10
0
파일: test_model.py 프로젝트: vova292/pyomo
    def test_solve1(self):
        model = ConcreteModel()
        model.A = RangeSet(1, 4)
        model.x = Var(model.A, bounds=(-1, 1))

        def obj_rule(model):
            return sum_product(model.x)

        model.obj = Objective(rule=obj_rule)

        def c_rule(model):
            expr = 0
            for i in model.A:
                expr += i * model.x[i]
            return expr == 0

        model.c = Constraint(rule=c_rule)
        opt = SolverFactory('glpk')
        results = opt.solve(model, symbolic_solver_labels=True)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1.out"), format='json')
        self.assertMatchesJsonBaseline(join(currdir, "solve1.out"),
                                       join(currdir, "solve1.txt"),
                                       tolerance=1e-4)

        #
        def d_rule(model):
            return model.x[1] >= 0

        model.d = Constraint(rule=d_rule)
        model.d.deactivate()
        results = opt.solve(model)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1x.out"), format='json')
        self.assertMatchesJsonBaseline(join(currdir, "solve1x.out"),
                                       join(currdir, "solve1.txt"),
                                       tolerance=1e-4)
        #
        model.d.activate()
        results = opt.solve(model)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1a.out"), format='json')
        self.assertMatchesJsonBaseline(join(currdir, "solve1a.out"),
                                       join(currdir, "solve1a.txt"),
                                       tolerance=1e-4)
        #
        model.d.deactivate()

        def e_rule(model, i):
            return model.x[i] >= 0

        model.e = Constraint(model.A, rule=e_rule)
        for i in model.A:
            model.e[i].deactivate()
        results = opt.solve(model)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1y.out"), format='json')
        self.assertMatchesJsonBaseline(join(currdir, "solve1y.out"),
                                       join(currdir, "solve1.txt"),
                                       tolerance=1e-4)
        #
        model.e.activate()
        results = opt.solve(model)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1b.out"), format='json')
        self.assertMatchesJsonBaseline(join(currdir, "solve1b.out"),
                                       join(currdir, "solve1b.txt"),
                                       tolerance=1e-4)
예제 #11
0
def build_gdp_model():

    # PARAMETERS
    T1_lo, T1_up = 350., 400.
    T2_lo, T2_up = 450., 500.

    U = {'1': 1.5, '2': 0.5, '3': 1}
    FCP = {'hot': 10.0, 'cold': 7.5}
    T_in = {'hot': 500., 'cold': 350., 'cooling': 300., 'steam': 600.}
    T_out = {'hot': 340., 'cold': 560., 'cooling': 320., 'steam': 600.}
    Cost = {'cooling': 20., 'steam': 80.}

    # VARIABLES
    m = ConcreteModel()
    m.T1 = Var(domain=NonNegativeReals, bounds=(T1_lo, T1_up))
    m.T2 = Var(domain=NonNegativeReals, bounds=(T2_lo, T2_up))

    m.exchangers = RangeSet(1, 3)
    m.A = Var(m.exchangers, domain=NonNegativeReals, bounds=(1e-4, 50))
    m.CP = Var(m.exchangers,
               domain=NonNegativeReals,
               bounds=(0, 600 * (50**0.6) + 2 * 46500))
    # Note that A_lo=0 leads to an exception in MC++ if using gdpopt with strategy 'GLOA'
    # The exception occurs when constructing McCormick relaxations

    # OBJECTIVE
    m.objective = Objective(expr=(sum(m.CP[i]
                                      for i in m.exchangers) + FCP['hot'] *
                                  (m.T1 - T_out['hot']) * Cost['cooling'] +
                                  FCP['cold'] *
                                  (T_out['cold'] - m.T2) * Cost['steam']))

    # GLOBAL CONSTRAINTS
    m.constr1 = Constraint(expr=FCP['hot'] * (T_in['hot'] - m.T1) == m.A[1] *
                           U['1'] * ((T_in['hot'] - m.T2) +
                                     (m.T1 - T_in['cold'])) / 2.)
    m.constr2 = Constraint(  # Note the error in the paper in constraint 2
        expr=FCP['hot'] * (m.T1 - T_out['hot']) == m.A[2] * U['2'] *
        ((T_out['hot'] - T_in['cooling']) + (m.T1 - T_out['cooling'])) / 2.)
    m.constr3 = Constraint(
        expr=FCP['cold'] * (T_out['cold'] - m.T2) == m.A[3] * U['3'] *
        ((T_out['steam'] - m.T2) + (T_in['steam'] - T_out['cold'])) / 2.)
    m.constr4 = Constraint(expr=FCP['hot'] *
                           (T_in['hot'] - m.T1) == FCP['cold'] *
                           (m.T2 - T_in['cold']))

    # DISJUNCTIONS
    @m.Disjunction(m.exchangers)
    def exchanger_disjunction(m, disjctn):
        return [[
            m.CP[disjctn] == 2750 * (m.A[disjctn]**0.6) + 3000,
            0. <= m.A[disjctn], m.A[disjctn] <= 10.
        ],
                [
                    m.CP[disjctn] == 1500 * (m.A[disjctn]**0.6) + 15000,
                    10. <= m.A[disjctn], m.A[disjctn] <= 25.
                ],
                [
                    m.CP[disjctn] == 600 * (m.A[disjctn]**0.6) + 46500,
                    25. <= m.A[disjctn], m.A[disjctn] <= 50.
                ]]

    return m
예제 #12
0
 def test_pickle_concrete_model_constant_objective(self):
     model = ConcreteModel()
     model.A = Objective(expr=1)
     str = pickle.dumps(model)
     tmodel = pickle.loads(str)
     self.verifyModel(model, tmodel)
예제 #13
0
 def test_pickle_concrete_model_indexed_var(self):
     model = ConcreteModel()
     model.A = Var([1, 2, 3], initialize={1: 100, 2: 200, 3: 300})
     str = pickle.dumps(model)
     tmodel = pickle.loads(str)
     self.verifyModel(model, tmodel)
예제 #14
0
    def test_solve1(self):
        model = ConcreteModel()
        model.A = RangeSet(1, 4)
        model.x = Var(model.A, bounds=(-1, 1))

        def obj_rule(model):
            return sum_product(model.x)

        model.obj = Objective(rule=obj_rule)

        def c_rule(model):
            expr = 0
            for i in model.A:
                expr += i * model.x[i]
            return expr == 0

        model.c = Constraint(rule=c_rule)
        opt = SolverFactory('glpk')
        results = opt.solve(model, symbolic_solver_labels=True)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1.out"), format='json')
        with open(join(currdir,"solve1.out"), 'r') as out, \
            open(join(currdir,"solve1.txt"), 'r') as txt:
            self.assertStructuredAlmostEqual(json.load(txt),
                                             json.load(out),
                                             abstol=1e-4,
                                             allow_second_superset=True)
        #
        def d_rule(model):
            return model.x[1] >= 0

        model.d = Constraint(rule=d_rule)
        model.d.deactivate()
        results = opt.solve(model)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1x.out"), format='json')
        with open(join(currdir,"solve1x.out"), 'r') as out, \
            open(join(currdir,"solve1.txt"), 'r') as txt:
            self.assertStructuredAlmostEqual(json.load(txt),
                                             json.load(out),
                                             abstol=1e-4,
                                             allow_second_superset=True)
        #
        model.d.activate()
        results = opt.solve(model)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1a.out"), format='json')
        with open(join(currdir,"solve1a.out"), 'r') as out, \
            open(join(currdir,"solve1a.txt"), 'r') as txt:
            self.assertStructuredAlmostEqual(json.load(txt),
                                             json.load(out),
                                             abstol=1e-4,
                                             allow_second_superset=True)
        #
        model.d.deactivate()

        def e_rule(model, i):
            return model.x[i] >= 0

        model.e = Constraint(model.A, rule=e_rule)
        for i in model.A:
            model.e[i].deactivate()
        results = opt.solve(model)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1y.out"), format='json')
        with open(join(currdir,"solve1y.out"), 'r') as out, \
            open(join(currdir,"solve1.txt"), 'r') as txt:
            self.assertStructuredAlmostEqual(json.load(txt),
                                             json.load(out),
                                             abstol=1e-4,
                                             allow_second_superset=True)
        #
        model.e.activate()
        results = opt.solve(model)
        model.solutions.store_to(results)
        results.write(filename=join(currdir, "solve1b.out"), format='json')
        with open(join(currdir,"solve1b.out"), 'r') as out, \
            open(join(currdir,"solve1b.txt"), 'r') as txt:
            self.assertStructuredAlmostEqual(json.load(txt),
                                             json.load(out),
                                             abstol=1e-4,
                                             allow_second_superset=True)
예제 #15
0
def TSP(G, TIME_LIMIT=600):
    # Number of places
    n = G.number_of_nodes()

    # TODO: Implement the model of your choice
    m = ConcreteModel()

    # 1. Data and ranges
    m.N = RangeSet(n)

    m.A = Set(initialize=((i, j) for i, j in G.edges()), dimen=2)

    # 2. Variables
    # TODO: introduce only usefull variables (no arc, no variable)
    m.x = Var(m.A, domain=NonNegativeReals, bounds=lambda m: (0, 1))

    # 3. Objective function
    # Objective function of arc variables
    m.obj = Objective(expr=sum(G[i][j]['weight'] * m.x[i, j]
                               for i, j in G.edges()))

    # 4. Constraints
    # Vincoli archi uscenti
    m.outdegree = ConstraintList()
    for i in m.N:
        m.outdegree.add(expr=sum(m.x[v, w] for v, w in G.out_edges(i)) == 1)

    # Vincoli archi entranti
    m.indegree = ConstraintList()
    for j in m.N:
        m.indegree.add(expr=sum(m.x[v, w] for v, w in G.in_edges(j)) == 1)

    # Arc constraint
    m.arcs = ConstraintList()
    for i, j in G.edges():
        if i < j:
            m.arcs.add(m.x[i, j] + m.x[j, i] <= 1)

    m.subtour = ConstraintList()

    solver = SolverFactory('gurobi')

    # 5. Solution
    # Solve the model
    SOLVER_NAME = 'gurobi'
    # SOLVER_NAME = 'glpk'

    solver = SolverFactory(SOLVER_NAME)

    if SOLVER_NAME == 'glpk':
        solver.options['tmlim'] = TIME_LIMIT
    elif SOLVER_NAME == 'gurobi':
        solver.options['TimeLimit'] = TIME_LIMIT

    it = 0
    Cold = []
    while it <= 100:
        it += 1
        sol = solver.solve(m, tee=False, load_solutions=False)

        # Get a JSON representation of the solution
        sol_json = sol.json_repn()
        # Check solution status
        if sol_json['Solver'][0]['Status'] != 'ok':
            return None, []

        # Load the solution
        m.solutions.load_from(sol)

        print(it, m.obj())

        selected = []
        values = []
        for i in m.N:
            for j in m.N:
                if i < j:
                    if m.x[i, j]() > 0 or m.x[j, i]() > 0:
                        selected.append((i - 1, j - 1))
                        values.append(m.x[i, j]() + m.x[j, i]())

        PlotTour(Ls, selected, values)

        # Build graph
        H = nx.Graph()

        for i in m.N:
            for j in m.N:
                if i < j:
                    if m.x[i, j]() > 0.00001 or m.x[j, i]() > 0.00001:
                        H.add_edge(i, j, weight=m.x[i, j])

        Cs = nx.cycle_basis(H)

        if Cs != Cold:
            Cold = Cs
            for cycle in Cs:
                Es = []
                for i in cycle:
                    for j in G.nodes():
                        if j not in cycle:
                            Es.append((i, j))

                if len(Es) > 0:
                    m.subtour.add(sum(m.x[i, j] for i, j in Es) >= 1)
        else:
            break

    selected = []
    values = []
    for i in m.N:
        for j in m.N:
            if i < j:
                if m.x[i, j]() > 0 or m.x[j, i]() > 0:
                    selected.append((i - 1, j - 1))
                    values.append(m.x[i, j]() + m.x[j, i]())

    PlotTour(Ls, selected, values)

    return m.obj(), selected
예제 #16
0
    def test_solve_with_pickle_then_clone(self):
        # This tests github issue Pyomo-#65
        model = ConcreteModel()
        model.A = RangeSet(1, 4)
        model.b = Block()
        model.b.x = Var(model.A, bounds=(-1, 1))
        model.b.obj = Objective(expr=sum_product(model.b.x))
        model.c = Constraint(expr=model.b.x[1] >= 0)
        opt = SolverFactory('glpk')
        self.assertEqual(len(model.solutions), 0)
        results = opt.solve(model, symbolic_solver_labels=True)
        self.assertEqual(len(model.solutions), 1)
        #
        self.assertEqual(model.solutions[0].gap, 0.0)
        #self.assertEqual(model.solutions[0].status, SolutionStatus.feasible)
        self.assertEqual(model.solutions[0].message, None)
        #
        buf = pickle.dumps(model)
        tmodel = pickle.loads(buf)
        self.assertEqual(len(tmodel.solutions), 1)
        self.assertEqual(tmodel.solutions[0].gap, 0.0)
        #self.assertEqual(tmodel.solutions[0].status, SolutionStatus.feasible)
        self.assertEqual(tmodel.solutions[0].message, None)
        self.assertIn(id(tmodel.b.obj),
                      tmodel.solutions[0]._entry['objective'])
        self.assertIs(
            tmodel.b.obj,
            tmodel.solutions[0]._entry['objective'][id(tmodel.b.obj)][0]())

        inst = tmodel.clone()

        # make sure the clone has all the attributes
        self.assertTrue(hasattr(inst, 'A'))
        self.assertTrue(hasattr(inst, 'b'))
        self.assertTrue(hasattr(inst.b, 'x'))
        self.assertTrue(hasattr(inst.b, 'obj'))
        self.assertTrue(hasattr(inst, 'c'))
        # and that they were all copied
        self.assertIsNot(inst.A, tmodel.A)
        self.assertIsNot(inst.b, tmodel.b)
        self.assertIsNot(inst.b.x, tmodel.b.x)
        self.assertIsNot(inst.b.obj, tmodel.b.obj)
        self.assertIsNot(inst.c, tmodel.c)

        # Make sure the solution is on the new model
        self.assertTrue(hasattr(inst, 'solutions'))
        self.assertEqual(len(inst.solutions), 1)
        self.assertEqual(inst.solutions[0].gap, 0.0)
        #self.assertEqual(inst.solutions[0].status, SolutionStatus.feasible)
        self.assertEqual(inst.solutions[0].message, None)

        # Spot-check some components and make sure all the weakrefs in
        # the ModelSOlution got updated
        self.assertIn(id(inst.b.obj), inst.solutions[0]._entry['objective'])
        _obj = inst.solutions[0]._entry['objective'][id(inst.b.obj)]
        self.assertIs(_obj[0](), inst.b.obj)

        for v in [1, 2, 3, 4]:
            self.assertIn(id(inst.b.x[v]),
                          inst.solutions[0]._entry['variable'])
            _v = inst.solutions[0]._entry['variable'][id(inst.b.x[v])]
            self.assertIs(_v[0](), inst.b.x[v])
예제 #17
0
def TSPSYM(G, TIME_LIMIT=600):
    # Number of places
    n = G.number_of_nodes()

    # TODO: Implement the model of your choice
    m = ConcreteModel()

    # 1. Data and ranges
    m.N = RangeSet(n)

    m.A = Set(initialize=((i, j) for i, j in G.edges()), dimen=2)

    # 2. Variables
    # TODO: introduce only usefull variables (no arc, no variable)
    m.x = Var(m.A, domain=NonNegativeReals, bounds=lambda m: (0, 1))

    # 3. Objective function
    # Objective function of arc variables
    m.obj = Objective(expr=sum(G[i][j]['weight'] * m.x[i, j] for i, j in m.A))

    # 4. Constraints
    # Vincoli archi uscenti
    m.degree = ConstraintList()
    for i in m.N:
        Es = []
        for v, w in G.edges(i):
            if v > w:
                v, w = w, v
            Es.append((v, w))
        m.degree.add(expr=sum(m.x[v, w] for v, w in Es) == 2)

    m.subtour = ConstraintList()

    solver = SolverFactory('gurobi')

    # 5. Solution
    # Solve the model
    SOLVER_NAME = 'gurobi'
    # SOLVER_NAME = 'glpk'

    solver = SolverFactory(SOLVER_NAME)

    if SOLVER_NAME == 'glpk':
        solver.options['tmlim'] = TIME_LIMIT
    elif SOLVER_NAME == 'gurobi':
        solver.options['TimeLimit'] = TIME_LIMIT

    it = 0
    Cold = []
    while it <= 100:
        it += 1
        sol = solver.solve(m, tee=False, load_solutions=False)

        # Get a JSON representation of the solution
        sol_json = sol.json_repn()
        # Check solution status
        if sol_json['Solver'][0]['Status'] != 'ok':
            return None, []

        # Load the solution
        m.solutions.load_from(sol)

        selected = []
        values = []
        for i, j in m.A:
            if m.x[i, j]() > 0:
                selected.append((i - 1, j - 1))
                values.append(m.x[i, j]())

        PlotTour(Ls, selected, values)

        # Build graph
        H = nx.Graph()

        for i, j in m.A:
            H.add_edge(i, j, weight=m.x[i, j]())

        # Cs = nx.connected_components(H)
        # Cs = list(Cs)
        # Cs = nx.cycle_basis(H)

        cut_value, S = nx.stoer_wagner(H)
        print(it, m.obj(), sum(values))
        flag = True

        if cut_value >= 2:
            print(cut_value)
            # Separate blossom
            H = nx.Graph()
            for i, j in m.A:
                if m.x[i, j]() > 0.1 and m.x[i, j]() < 0.9:
                    if i < j:
                        H.add_edge(i, j)
                    else:
                        H.add_edge(j, i)

            selected = []
            values = []
            for i, j in m.A:
                selected.append((i - 1, j - 1))
                values.append(m.x[i, j]())
            Cs = nx.cycle_basis(H)
            for cycle in Cs:
                NS = len(cycle)
                if NS == 3:
                    S = set()
                    for i in range(NS):
                        if cycle[i - 1] < cycle[i]:
                            S.add((cycle[i - 1], cycle[i]))
                        else:
                            S.add((cycle[i], cycle[i - 1]))

                    for i in cycle:
                        for j in G.neighbors(i):
                            if (i, j) not in S:
                                v, w = i, j
                                if i > j:
                                    v, w = j, i
                                if m.x[v, w]() > 0.9:
                                    S.add((v, w))

                    if False and len(S) > NS + 2:
                        m.subtour.add(sum(m.x[i, j] for i, j in S) <= NS + 1)
                        flag = False
                        print('added', S)

        else:
            Es = []
            for i in S[0]:
                for j in S[1]:
                    if i < j:
                        Es.append((i, j))
                    else:
                        Es.append((j, i))

            if len(Es) > 0:
                m.subtour.add(sum(m.x[i, j] for i, j in Es) >= 2)
                flag = False
        if flag:
            break

        # sleep(1)

        # if Cs == Cold:
        #     break

        # Cold = Cs
        # for cycle in Cs:
        #     Es = []
        #     for i,j in m.A:
        #         if (i in cycle and j not in cycle) or (i not in cycle and j in cycle):
        #             if i < j:
        #                 Es.append( (i,j) )
        #             else:
        #                 Es.append( (j,i) )
        #             Es.append( (i,j) )

        #     if len(Es) > 0:
        #         m.subtour.add( sum(m.x[i,j] for i,j in Es ) >= 2 )

    selected = []
    values = []
    for i, j in m.A:
        if m.x[i, j]() > 0:
            selected.append((i - 1, j - 1))
            values.append(m.x[i, j]())
    print(values)
    PlotTour(Ls, selected, values)

    return m.obj(), selected
예제 #18
0
 def test_pickle_concrete_model_set(self):
     model = ConcreteModel()
     model.A = Set(initialize=[1, 2, 3])
     str = pickle.dumps(model)
     tmodel = pickle.loads(str)
     self.verifyModel(model, tmodel)
예제 #19
0
 def test_sum3(self):
     model = ConcreteModel()
     model.A = Set(initialize=[1,2,3], doc='set A')
     model.x = Var(model.A)
     expr = quicksum(model.x)
     self.assertEqual( expr, 6)
예제 #20
0
 def test_pickle_concrete_model_mutable_indexed_param(self):
     model = ConcreteModel()
     model.A = Param([1, 2, 3], initialize={1: 100, 3: 300}, mutable=True)
     str = pickle.dumps(model)
     tmodel = pickle.loads(str)
     self.verifyModel(model, tmodel)
  def __init__(self, results, threshold=None, dist_threshold=1.0, distance={}, expression={}, uncertainty={}, overlap=0, k=10, k_taa=0,
               solver="glpk", verbosity=0, include=[]):
        """
        :param results: Epitope prediction result object from which the epitope selection should be performed
        :type results: :class:`~Fred2.Core.Result.EpitopePredictionResult`
        :param dict(str,float) threshold: A dictionary scoring the binding thresholds for each HLA
                                          :class:`~Fred2.Core.Allele.Allele` key = allele name; value = the threshold
        :param float dist_threshold: Distance threshold: an epitope gets excluded if an epitope has dist-2-self score
                                    smaller or equal to this threshold for any HLA allele
        :param dict((str,str),float) distance: A dictionary with key: (peptide sequence, HLA name)
                                               and value the distance2self
        :param dict(str, float) expression: A dictionary with key: gene ID, and value: Gene expression
                                            in FPKM/RPKM or TPM
        :param dict((str,str),float) uncertainty: A dictionary with key (peptide seq, HLA name), and value the
                                                  associated uncertainty of the immunogenicity prediction
        :param int k: The number of epitopes to select
        :param int k_taa: The number of TAA epitopes to select
        :param str solver: The solver to be used (default glpk)
        :param int verbosity: Integer defining whether additional debug prints are made >0 => debug mode
        """

        # check input data
        if not isinstance(results, EpitopePredictionResult):
            raise ValueError("first input parameter is not of type EpitopePredictionResult")

        _alleles = results.columns.values.tolist()

        # generate abundance dictionary of HLA alleles default is 2.0 as values will be log2 transformed
        probs = {a.name:2.0 if a.get_metadata("abundance", only_first=True) is None else
                 a.get_metadata("abundance", only_first=True) for a in _alleles}

        # start constructing model
        self.__solver = SolverFactory(solver)
        self.__verbosity = verbosity
        self.__changed = True
        self.__alleleProb = _alleles
        self.__k = k
        self.__k_taa = k_taa
        self.__result = None
        self.__thresh = {} if threshold is None else threshold
        self.__included = include
        self.overlap=overlap

        # variable, set and parameter preparation
        alleles_I = {}
        variations = []
        epi_var = {}
        imm = {}
        peps = {}
        taa = []
        var_epi = {}
        cons = {}

        for a in _alleles:
            alleles_I.setdefault(a.name, set())

        # unstack multiindex df to get normal df based on first prediction method
        # and filter for binding epitopes
        method = results.index.values[0][1]
        res_df = results.xs(results.index.values[0][1], level="Method")

        # if predcitions are not available for peptides/alleles, replace by 0
        res_df.fillna(0, inplace=True)

        res_df = res_df[res_df.apply(lambda x: any(x[a] > self.__thresh.get(a.name, -float("inf"))
                                                   for a in res_df.columns), axis=1)]

        res_df.fillna(0, inplace=True)
        # transform scores to 1-log50k(IC50) scores if neccassary
        # and generate mapping dictionaries for Set definitions
        for tup in res_df.itertuples():
            p = tup[0]
            seq = str(p)

            if any(distance.get((seq, a.name), 1.0) <= dist_threshold for a in _alleles):
                continue
            peps[seq] = p
            if p.get_metadata("taa",only_first=True):
                taa.append(seq)
            for a, s in itr.izip(res_df.columns, tup[1:]):
                if method in ["smm", "smmpmbec", "arb", "comblibsidney"]:
                    try:
                        thr = min(1., max(0.0, 1.0 - math.log(self.__thresh.get(a.name),
                                                      50000))) if a.name in self.__thresh else -float("inf")
                    except:
                        thr = 0

                    if s >= thr:
                        alleles_I.setdefault(a.name, set()).add(seq)
                    imm[seq, a.name] = min(1., max(0.0, 1.0 - math.log(s, 50000)))
                else:
                    if s > self.__thresh.get(a.name, -float("inf")):
                        alleles_I.setdefault(a.name, set()).add(seq)
                    imm[seq, a.name] = s

            prots = set(pr for pr in p.get_all_proteins())
            cons[seq] = len(prots)
            for prot in prots:
                variations.append(prot.gene_id)
                epi_var.setdefault(prot.gene_id, set()).add(seq)
                var_epi.setdefault(str(seq), set()).add(prot.gene_id)
        self.__peptideSet = peps

        # calculate conservation
        variations = set(variations)
        total = len(variations)
        for e, v in cons.iteritems():
            try:
                cons[e] = v / total
            except ZeroDivisionError:
                cons[e] = 1
        model = ConcreteModel()

        ######################################
        #
        # MODEL DEFINITIONS
        #
        ######################################

        # set definition
        model.Q = Set(initialize=variations)
        model.E = Set(initialize=set(peps.keys()))
        model.TAA = Set(initialize=set(taa))
        model.A = Set(initialize=alleles_I.keys())
        model.G = Set(model.E, initialize=lambda model, e: var_epi[e])
        model.E_var = Set(model.Q, initialize=lambda mode, v: epi_var[v])
        model.A_I = Set(model.A, initialize=lambda model, a: alleles_I[a])

        if self.__included is not None:
            if len(self.__included) > k:
                raise ValueError("More epitopes to include than epitopes to select! "
                                 "Either raise k or reduce epitopes to include.")
        model.Include = Set(within=model.E, initialize=self.__included)

        if overlap > 0:
            def longest_common_substring(model):
                result = []
                for s1,s2 in itr.combinations(model.E,2):
                    if s1 != s2:
                        if s1 in s2 or s2 in s1:
                            result.append((s1,s2))
                        m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
                        longest, x_longest = 0, 0
                        for x in xrange(1, 1 + len(s1)):
                            for y in xrange(1, 1 + len(s2)):
                                if s1[x - 1] == s2[y - 1]:
                                    m[x][y] = m[x - 1][y - 1] + 1
                                    if m[x][y] > longest:
                                        longest = m[x][y]
                                        x_longest = x
                                else:
                                    m[x][y] = 0
                        if len(s1[x_longest - longest: x_longest]) >= overlap:
                            result.append((s1,s2))
                return set(result)
            model.O = Set(dimen=2, initialize=longest_common_substring)

        # parameter definition
        model.k = Param(initialize=self.__k, within=PositiveIntegers, mutable=True)
        model.k_taa = Param(initialize=self.__k_taa, within=NonNegativeIntegers, mutable=True)
        model.p = Param(model.A, initialize=lambda model, a: max(0, math.log(probs[a]+0.001,2)))
        model.c = Param(model.E, initialize=lambda model, e: cons[e],mutable=True)
        model.sigma = Param (model. E, model.A, initialize=lambda model, e, a: uncertainty.get((e,a), 0))
        model.i = Param(model.E, model.A, initialize=lambda model, e, a: imm[e, a])
        model.t_allele = Param(initialize=0, within=NonNegativeIntegers, mutable=True)
        model.t_var = Param(initialize=0, within=NonNegativeIntegers, mutable=True)
        model.t_c = Param(initialize=0.0, within=NonNegativeReals, mutable=True)
        model.abd = Param(model.Q, initialize=lambda model, g: max(0, math.log(expression.get(g, 2)+0.001, 2)))
        model.eps1 = Param(initialize=1e6, mutable=True)
        model.eps2 = Param(initialize=1e6, mutable=True)

        # variable Definition
        model.x = Var(model.E, within=Binary)
        model.y = Var(model.A, within=Binary)
        model.z = Var(model.Q, within=Binary)

        # objective definition
        model.Obj1 = Objective(
            rule=lambda model: -sum(model.x[e] * sum(model.abd[g] for g in model.G[e])
                             * sum(model.p[a] * model.i[e, a] for a in model.A) for e in model.E),
            sense=minimize)
        model.Obj2 = Objective(
            rule=lambda model: sum(model.x[e]*sum(model.sigma[e,a] for a in model.A) for e in model.E),
            sense=minimize)

        # constraints
        # obligatory Constraint (number of selected epitopes)
        model.NofSelectedEpitopesCov1 = Constraint(rule=lambda model: sum(model.x[e] for e in model.E) >= model.k)
        model.NofSelectedEpitopesCov2 = Constraint(rule=lambda model: sum(model.x[e] for e in model.E) <= model.k)
        model.NofSelectedTAACov = Constraint(rule=lambda model: sum(model.x[e] for e in model.TAA) <= model.k_taa)

        # optional constraints (in basic model they are disabled)
        model.IsAlleleCovConst = Constraint(model.A,
                                            rule=lambda model, a: sum(model.x[e] for e in model.A_I[a]) >= model.y[a])

        model.MinAlleleCovConst = Constraint(rule=lambda model: sum(model.y[a] for a in model.A) >= model.t_allele)

        model.IsAntigenCovConst = Constraint(model.Q,
                                             rule=lambda model, q: sum(model.x[e] for e in model.E_var[q]) >= model.z[q])
        model.MinAntigenCovConst = Constraint(rule=lambda model: sum(model.z[q] for q in model.Q) >= model.t_var)

        model.EpitopeConsConst = Constraint(model.E,
                                            rule=lambda model, e: (1 - model.c[e]) * model.x[e] <= 1 - model.t_c)

        if overlap > 0:
            model.OverlappingConstraint = Constraint(model.O, rule=lambda model, e1, e2: model.x[e1]+model.x[e2] <= 1)

        # constraints for Pareto optimization
        model.ImmConst = Constraint(rule=lambda model: sum(model.x[e] * sum(model.abd[g] for g in model.G[e])
                                                       * sum(model.p[a] * model.i[e, a]
                                                       for a in model.A) for e in model.E) <= model.eps1)
        model.UncertaintyConst = Constraint(rule=lambda model:sum(model.x[e]*sum(model.sigma[e,a]
                                                                                 for a in model.A)
                                                                  for e in model.E) <= model.eps2)
        self.__objectives = [model.Obj1, model.Obj2]
        self.__constraints = [model.UncertaintyConst, model.ImmConst]
        self.__epsilons = [model.eps2, model.eps1]

        # include constraint
        model.IncludeEpitopeConstraint = Constraint(model.Include, rule=lambda model, e: model.x[e] >= 1)

        # generate instance
        self.instance = model
        if self.__verbosity > 0:
            print "MODEL INSTANCE"
            self.instance.pprint()

        # constraints
        self.instance.Obj2.deactivate()
        self.instance.ImmConst.deactivate()
        self.instance.UncertaintyConst.deactivate()
        self.instance.IsAlleleCovConst.deactivate()
        self.instance.MinAlleleCovConst.deactivate()
        self.instance.IsAntigenCovConst.deactivate()
        self.instance.MinAntigenCovConst.deactivate()
        self.instance.EpitopeConsConst.deactivate()