Exemplo n.º 1
0
def main(l1, l2, segments=False, printResult=True):

    prob = LpProblem("Line Segments intersection", LpMinimize)

    xLeftBorder, xRightBorder = None, None
    if segments:
        xLeftBorder = max([l1[2], l2[2]])
        xRightBorder = min([l1[3], l2[3]])

    x = LpVariable("x",
                   xLeftBorder,
                   xRightBorder,
                   )

    y = LpVariable("y",
                   None,
                   None,
                   )

    prob += 0, "arbitrary objective"

    prob += x*l1[0] + l1[1] == x*l2[0] + l2[1]

    prob += (y - l1[1])/l1[0] == (y - l2[1])/l2[0]

    prob.solve()

    if printResult:
        print("Status:", LpStatus[prob.status])

        if LpStatus[prob.status] == "Optimal":
            return x.value(), y.value()

    return None
def main(p1, p2, printResult=True):

    prob = LpProblem("Line Equation from 2 points", LpMinimize)

    coef = LpVariable("coef",
                      None,
                      None,
                      )

    z = LpVariable("z",
                   None,
                   None,
                   )

    prob += 0, "arbitrary objective"

    prob += (coef * p1[0]) + z == p1[1]
    prob += (coef * p2[0]) + z == p2[1]

    prob.solve()

    if printResult:
        print("Status:", LpStatus[prob.status])

        if LpStatus[prob.status] == "Optimal":
            for v in prob.variables():
                print(v.name, "=", v.varValue)
            print("OBJECTIVE = ", value(prob.objective))

    return coef.value(), z.value()
 def minimize(self):
     model = LpProblem(name='nodal_minimize', sense=LpMinimize)
     powerVar = LpVariable('Nodal_power', lowBound=0, upBound=1000)
     line_vars = {}
     for line in self.lines:
         line_vars[LpVariable('line_' + str(line),
                              lowBound=line.limits[0],
                              upBound=line.limits[1])] = (line.ld, line)
     # We have all the variables, compile them
     model += self.cost_per_mw * powerVar - lpSum(
         [key * line_vars[key][0] for key in line_vars.keys()])
     model += -1 * powerVar + lpSum([key for key in line_vars.keys()
                                     ]) == -1 * self.Pload
     model.solve()
     print(powerVar.value())
     self.Pgen = powerVar.value()
     for key in line_vars.keys():
         print(key.value())
         line_vars[key][1].Pnode[self] = key.value()
     print()
Exemplo n.º 4
0
    def optimal_routing_mlu_critical_pairs(self, tm_idx, critical_pairs):
        tm = self.traffic_matrices[tm_idx]

        pairs = critical_pairs

        demands = {}
        background_link_loads = np.zeros((self.num_links))
        for i in range(self.num_pairs):
            s, d = self.pair_idx_to_sd[i]
            #background link load
            if i not in critical_pairs:
                self.ecmp_next_hop_distribution(background_link_loads,
                                                tm[s][d], s, d)
            else:
                demands[i] = tm[s][d]

        model = LpProblem(name="routing")

        pair_links = [(pr, e[0], e[1]) for pr in pairs for e in self.lp_links]
        ratio = LpVariable.dicts(name="ratio",
                                 indexs=pair_links,
                                 lowBound=0,
                                 upBound=1)

        link_load = LpVariable.dicts(name="link_load", indexs=self.links)

        r = LpVariable(name="congestion_ratio")

        for pr in pairs:
            model += (lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][0]
            ]) - lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][0]
            ]) == -1, "flow_convervation_constr1_%d" % pr)

        for pr in pairs:
            model += (lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[1] == self.pair_idx_to_sd[pr][1]
            ]) - lpSum([
                ratio[pr, e[0], e[1]]
                for e in self.lp_links if e[0] == self.pair_idx_to_sd[pr][1]
            ]) == 1, "flow_convervation_constr2_%d" % pr)

        for pr in pairs:
            for n in self.lp_nodes:
                if n not in self.pair_idx_to_sd[pr]:
                    model += (lpSum([
                        ratio[pr, e[0], e[1]]
                        for e in self.lp_links if e[1] == n
                    ]) - lpSum([
                        ratio[pr, e[0], e[1]]
                        for e in self.lp_links if e[0] == n
                    ]) == 0, "flow_convervation_constr3_%d_%d" % (pr, n))

        for e in self.lp_links:
            ei = self.link_sd_to_idx[e]
            model += (
                link_load[ei] == background_link_loads[ei] +
                lpSum([demands[pr] * ratio[pr, e[0], e[1]]
                       for pr in pairs]), "link_load_constr%d" % ei)
            model += (link_load[ei] <= self.link_capacities[ei] * r,
                      "congestion_ratio_constr%d" % ei)

        model += r + OBJ_EPSILON * lpSum([link_load[ei] for ei in self.links])

        model.solve(solver=GLPK(msg=False))
        assert LpStatus[model.status] == 'Optimal'

        obj_r = r.value()
        solution = {}
        for k in ratio:
            solution[k] = ratio[k].value()

        return obj_r, solution
Exemplo n.º 5
0
	def learn2(self, performance_table):
		"""This version of learn checks whether or not an alternative is to be kept
		in the learning process. This method is also faster than the previous version
		""" 

		prob = LpProblem("Learn", LpMaximize)

		alts = filter(lambda a: a.__class__.__name__ != "LimitProfile", performance_table.alts)
		crits = self.crits

		alts_name = [alt.name for alt in alts]
		crits_name = [crit.name for crit in crits]
		
		categories = self.categories
		
		categories.sort(key=lambda c: c.rank, reverse=True)	
		
		categoriesUp = list(categories)
		firstCat = categoriesUp.pop(0)
		
		categoriesDown = list(categories)
		lastCat = categoriesDown.pop()
		
		categories0 = list(categories)
		categories0.insert(0, Category(rank = (categories[0].rank + 1), name = "fake")) #add a fake category on the first position
				
		alternativesByCat = {}
		for cat in categories:
			alternativesByCat[cat] = [alt for alt in alts if alt.category == cat]
			
		#small float number
		epsilon = 0.001

		#variables (v: variable, d: dict of variables)
		v_lambda = LpVariable("lambda", lowBound=0.5, upBound=1)
		d_gamma = LpVariable.dicts("gamma", alts_name, cat=LpBinary)
		d_p = LpVariable.dicts("p", crits_name, lowBound=0, upBound=1)
		d_gb = LpVariable.dicts("gb", \
								[crit.name + cat.name for crit in crits for cat in categories0], \
								lowBound=0, \
								upBound=1)
		d_deltaInf = LpVariable.dicts("deltaInf", \
									  [alt.name + crit.name for alt in alts for crit in crits], \
									  cat=LpBinary)
		d_deltaSup = LpVariable.dicts("deltaSup", \
									  [alt.name + crit.name for alt in alts for crit in crits], \
									  cat=LpBinary)
		d_cInf = LpVariable.dicts("cInf", \
								  [alt.name + crit.name for alt in alts for crit in crits], \
								  lowBound=0, upBound=1)
		d_cSup = LpVariable.dicts("cSup", \
								  [alt.name + crit.name for alt in alts for crit in crits], \
								  lowBound=0, upBound=1)

		#maximize
		prob += sum(d_gamma[alt.name] for alt in alts)

		#constraints		
		for crit in crits:
			prob += d_gb[crit.name + "fake"] == 0
			prob += d_gb[crit.name + lastCat.name] == 1
					
		for cat in categoriesDown:
			for alt in alternativesByCat[cat]:
				tmp =  alt.name + crit.name #fixed a weird bug with pulp
				prob += sum(d_cSup[tmp] for crit in crits) + epsilon <= v_lambda + 2 * (1 - d_gamma[alt.name])
		for cat in categoriesUp:
			for alt in alternativesByCat[cat]:
				tmp =  alt.name + crit.name #fixed a weird bug with pulp
				prob += sum(d_cInf[tmp] for crit in crits) >= v_lambda - 2 * (1 - d_gamma[alt.name])					

		for alt in alts:	   
			for crit in crits:
				prob += d_cInf[alt.name + crit.name] <= d_p[crit.name]
				prob += d_cSup[alt.name + crit.name] <= d_p[crit.name]
				prob += d_cInf[alt.name + crit.name] <= d_deltaInf[alt.name + crit.name]
				prob += d_cSup[alt.name + crit.name] <= d_deltaSup[alt.name + crit.name]
				prob += d_cInf[alt.name + crit.name] >= d_deltaInf[alt.name + crit.name] + d_p[crit.name] - 1
				prob += d_cSup[alt.name + crit.name] >= d_deltaSup[alt.name + crit.name] + d_p[crit.name] - 1
				
		prev_cat_name = "fake"
		for cat in categories:
			for alt in alternativesByCat[cat]:
				for crit in crits:
					prob += d_deltaInf[alt.name + crit.name] >= performance_table[alt][crit] - d_gb[crit.name + prev_cat_name] + epsilon
					prob += d_deltaSup[alt.name + crit.name] >= performance_table[alt][crit] - d_gb[crit.name + cat.name] + epsilon
					prob += d_deltaInf[alt.name + crit.name] <= performance_table[alt][crit] - d_gb[crit.name + prev_cat_name] + 1
					prob += d_deltaSup[alt.name + crit.name] <= performance_table[alt][crit] - d_gb[crit.name + cat.name] + 1
			prev_cat_name = cat.name

		prev_cat_name = firstCat.name
		for cat in categoriesUp:
			for crit in crits:
				prob += d_gb[crit.name + cat.name] >= d_gb[crit.name + prev_cat_name]
			prev_cat_name = cat.name
			
		prob += sum(d_p[crit.name] for crit in crits) == 1
		
		print prob
		
		#solver
		GLPK().solve(prob)
		
		#update parameters
		self.cutting_threshold = v_lambda.value()

		for crit in crits:
			crit.weight = d_p[crit.name].value()
			
		for cat in categoriesDown:
			for crit in crits:
				performance_table[cat.lp_sup][crit] = d_gb[crit.name + cat.name].value()		
				
		self.ignoredAlternatives = []
		for alt in alts:
			if d_gamma[alt.name].value() == 0:
				self.ignoredAlternatives.append(alt)
Exemplo n.º 6
0
	def learn(self, performance_table):
		"""Learn parameters"""

		prob = LpProblem("Learn", LpMaximize)

		alts = filter(lambda a: a.__class__.__name__ != "LimitProfile", performance_table.alts)
		crits = self.crits

		alts_name = [alt.name for alt in alts]
		crits_name = [crit.name for crit in crits]
		
		categories = self.categories
		
		categories.sort(key=lambda c: c.rank, reverse=True)	
		
		categoriesUp = list(categories)
		firstCat = categoriesUp.pop(0)
		
		categoriesDown = list(categories)
		lastCat = categoriesDown.pop()
		
		categories0 = list(categories)
		categories0.insert(0, Category(rank = (categories[0].rank + 1), name = "fake")) #add a fake category on the first position
				
		alternativesByCat = {}
		for cat in categories:
			alternativesByCat[cat] = [alt for alt in alts if alt.category == cat]
			
		#small float number
		epsilon = 0.000001

		#variables (v: variable, d: dict of variables)
		v_lambda = LpVariable("lambda", lowBound=0.5, upBound=1)
		v_alpha = LpVariable("alpha", lowBound=0)
		d_x = LpVariable.dicts("x", alts_name, lowBound=0)
		d_y = LpVariable.dicts("y", alts_name, lowBound=0)
		d_p = LpVariable.dicts("p", crits_name, lowBound=0, upBound=1)
		d_gb = LpVariable.dicts("gb", \
								[crit.name + cat.name for crit in crits for cat in categories0], \
								lowBound=0, \
								upBound=1)
		d_deltaInf = LpVariable.dicts("deltaInf", \
									  [alt.name + crit.name for alt in alts for crit in crits], \
									  cat=LpBinary)
		d_deltaSup = LpVariable.dicts("deltaSup", \
									  [alt.name + crit.name for alt in alts for crit in crits], \
									  cat=LpBinary)
		d_cInf = LpVariable.dicts("cInf", \
								  [alt.name + crit.name for alt in alts for crit in crits], \
								  lowBound=0, upBound=1)
		d_cSup = LpVariable.dicts("cSup", \
								  [alt.name + crit.name for alt in alts for crit in crits], \
								  lowBound=0, upBound=1)

		#maximize
		prob += v_alpha

		#constraints
		for crit in crits:
			prob += d_gb[crit.name + "fake"] == 0
			prob += d_gb[crit.name + lastCat.name] == 1
		

		for cat in categoriesDown:
			for alt in alternativesByCat[cat]:
				prob += sum(d_cSup[alt.name + crit.name] for crit in crits) + d_x[alt.name] == v_lambda
		for cat in categoriesUp:
			for alt in alternativesByCat[cat]:
				prob += sum(d_cInf[alt.name + crit.name] for crit in crits) == v_lambda + d_y[alt.name]
				
		for alt in alts:
			prob += v_alpha <= d_x[alt.name]
			prob += v_alpha <= d_y[alt.name]
			prob += d_x[alt.name] >= epsilon
			
			for crit in crits:
				prob += d_cInf[alt.name + crit.name] <= d_p[crit.name]
				prob += d_cSup[alt.name + crit.name] <= d_p[crit.name]
				prob += d_cInf[alt.name + crit.name] <= d_deltaInf[alt.name + crit.name]
				prob += d_cSup[alt.name + crit.name] <= d_deltaSup[alt.name + crit.name]
				prob += d_cInf[alt.name + crit.name] >= d_deltaInf[alt.name + crit.name] + d_p[crit.name] - 1
				prob += d_cSup[alt.name + crit.name] >= d_deltaSup[alt.name + crit.name] + d_p[crit.name] - 1
				
		prev_cat_name = "fake"
		for cat in categories:
			for alt in alternativesByCat[cat]:
				for crit in crits:
					prob += d_deltaInf[alt.name + crit.name] >= performance_table[alt][crit] - d_gb[crit.name + prev_cat_name] + epsilon
					prob += d_deltaSup[alt.name + crit.name] >= performance_table[alt][crit] - d_gb[crit.name + cat.name] + epsilon
					prob += d_deltaInf[alt.name + crit.name] <= performance_table[alt][crit] - d_gb[crit.name + prev_cat_name] + 1
					prob += d_deltaSup[alt.name + crit.name] <= performance_table[alt][crit] - d_gb[crit.name + cat.name] + 1
			prev_cat_name = cat.name

		prev_cat_name = firstCat.name
		for cat in categoriesUp:
			for crit in crits:
				prob += d_gb[crit.name + cat.name] >= d_gb[crit.name + prev_cat_name]
			prev_cat_name = cat.name
			
		prob += sum(d_p[crit.name] for crit in crits) == 1
		
		print prob
		
		#solver
		GLPK().solve(prob)

		#prob.writeLP("SpamClassification.lp")
		#status = prob.solve()


		#update parameters
		self.cutting_threshold = v_lambda.value()

		for crit in crits:
			crit.weight = d_p[crit.name].value()
			
		for cat in categoriesDown:
			for crit in crits:
				performance_table[cat.lp_sup][crit] = d_gb[crit.name + cat.name].value()
Exemplo n.º 7
0
	def learn_two_cat2(self, performance_table):
		"""This version of learnTwoCat checks whether or not an alternative is to be kept
		in the learning process. This method is also faster than the previous version
		""" 
		
		if len(self.categories) != 2:
			raise SemanticError, "The learnTwoCat() method requires exactly two categories."

		prob = LpProblem("twoCat2", LpMaximize)
		
		alts = filter(lambda a: a.__class__.__name__ != "LimitProfile", performance_table.alts)
		crits = self.crits

		alts_name = [alt.name for alt in alts]
		crits_name = [crit.name for crit in crits]

		self.categories.sort(key=lambda c: c.rank)

		alts1 = [alt for alt in alts if alt.category == self.categories[0]]
		alts2 = [alt for alt in alts if alt.category == self.categories[1]]

		#small float number
		epsilon = 0.0001

		#variables (v: variable, d: dict of variables)
		v_lambda = LpVariable("lambda", lowBound=0.5, upBound=1)
		d_gamma = LpVariable.dicts("gamma", alts_name, cat=LpBinary)
		d_p = LpVariable.dicts("p", crits_name, lowBound=0, upBound=1)
		d_gb = LpVariable.dicts("gb", crits_name, lowBound=0, upBound=1)
		d_delta = LpVariable.dicts("delta", \
			[alt.name + crit.name for alt in alts for crit in crits]\
			, cat=LpBinary)
		d_c = LpVariable.dicts("c", \
			[alt.name + crit.name for alt in alts for crit in crits]\
			, lowBound=0, upBound=1)

		#maximize
		prob += sum(d_gamma[alt.name] for alt in alts)

		#constraints
		for alt in alts2:
			prob += sum(d_c[alt.name + crit.name] for crit in crits) + epsilon <= v_lambda + 2 * (1 - d_gamma[alt.name])
		for alt in alts1:
			prob += sum(d_c[alt.name + crit.name] for crit in crits) >= v_lambda - 2 * (1 - d_gamma[alt.name])
			
		for alt in alts:
			for crit in crits:
				prob += d_c[alt.name + crit.name] <= d_p[crit.name]
				prob += d_c[alt.name + crit.name] <= d_delta[alt.name + crit.name]
				prob += d_c[alt.name + crit.name] >= d_delta[alt.name + crit.name] + d_p[crit.name] - 1
				prob += d_delta[alt.name + crit.name] >= performance_table[alt][crit] - d_gb[crit.name] + epsilon
				prob += d_delta[alt.name + crit.name] <= performance_table[alt][crit] - d_gb[crit.name] + 1
				
		prob += sum(d_p[crit.name] for crit in crits)  == 1

		#solver
		GLPK().solve(prob)

		#update parameters
		self.cutting_threshold = v_lambda.value()

		for crit in crits:
			crit.weight = d_p[crit.name].value()

		for crit in crits:
			self.lp_performance_table[self.limit_profiles[0]][crit] = d_gb[crit.name].value()
		
		self.ignoredAlternatives = []
		for alt in alts:
			if d_gamma[alt.name].value() == 0:
				self.ignoredAlternatives.append(alt)
Exemplo n.º 8
0
	def learn_two_cat(self, performance_table):
		"""Learn parameters for two categories and two training sets of alternatives."""		
		if len(self.categories) != 2:
			raise SemanticError, "The learnTwoCat() method requires exactly two categories."

		prob = LpProblem("twoCat", LpMaximize)

		alts = filter(lambda a: a.__class__.__name__ != "LimitProfile", performance_table.alts)
		crits = self.crits

		alts_name = [alt.name for alt in alts if alt.__class__.__name__ != "LimitProfile"]
		crits_name = [crit.name for crit in crits]

		self.categories.sort(key=lambda c: c.rank)

		alts1 = [alt for alt in alts if alt.category == self.categories[0]]
		alts2 = [alt for alt in alts if alt.category == self.categories[1]]

		#small float number
		epsilon = 0.00001

		#variables (v: variable, d: dict of variables)
		v_lambda = LpVariable("lambda", lowBound=0.5, upBound=1)
		v_alpha = LpVariable("alpha", lowBound=0)
		d_x = LpVariable.dicts("x", alts_name, lowBound=0)
		d_y = LpVariable.dicts("y", alts_name, lowBound=0)
		d_p = LpVariable.dicts("p", crits_name, lowBound=0, upBound=1)
		d_gb = LpVariable.dicts("gb", crits_name, lowBound=0, upBound=1)
		d_delta = LpVariable.dicts("delta", \
			[alt.name + crit.name for alt in alts for crit in crits]\
			, cat=LpBinary)
		d_c = LpVariable.dicts("c", \
			[alt.name + crit.name for alt in alts for crit in crits]\
			, lowBound=0, upBound=1)

		#maximize
		prob += v_alpha

		#constraints
		for alt in alts2:
			prob += sum(d_c[alt.name + crit.name] for crit in crits) + d_x[alt.name] == v_lambda
		for alt in alts1:
			prob += sum(d_c[alt.name + crit.name] for crit in crits) == v_lambda + d_y[alt.name]

		for alt in alts:
			prob += v_alpha <= d_x[alt.name]
			prob += v_alpha <= d_y[alt.name]
			prob += d_x[alt.name] >= epsilon
			
			for crit in crits:
				prob += d_c[alt.name + crit.name] <= d_p[crit.name]
				prob += d_c[alt.name + crit.name] >= d_delta[alt.name + crit.name] + d_p[crit.name] - 1
				prob += d_c[alt.name + crit.name] <= d_delta[alt.name + crit.name]
				prob += d_delta[alt.name + crit.name] >= performance_table[alt][crit] - d_gb[crit.name] + epsilon
				prob += d_delta[alt.name + crit.name] <= performance_table[alt][crit] - d_gb[crit.name] + 1

		prob += sum(d_p[crit.name] for crit in crits) == 1

		#solver
		GLPK().solve(prob)

		#prob.writeLP("SpamClassification.lp")
		#status = prob.solve()


		#update parameters
		self.cutting_threshold = v_lambda.value()

		for crit in crits:
			crit.weight = d_p[crit.name].value()

		for crit in crits:
			performance_table[self.limit_profiles[0]][crit] = d_gb[crit.name].value()
Exemplo n.º 9
0
    def branch_and_bound(remote_num):
        ## 問題の宣言
        prob = pulp.LpProblem(sense=pulp.LpMinimize)

        # ユニットの数
        N = unit_num
        # 箱の数
        M = remote_num
        # 接続できるCANの数
        can_master_max = 4
        can_remote_max = 4 if remote_num <= 3 else 2
        # 接続できるENの数
        en_max = 4

        ## 変数の宣言
        x = []
        for i in range(N):
            tmp = []
            for j in range(M):
                var = Var(name='x{}_{}'.format(i, j), cat='Binary')
                tmp.append(var)
            x.append(tmp)

        y = Var(name='y', cat='Integer')

        ##  目的関数
        prob += y

        ## 制約
        prob += y >= 0

        # 全部割り当てられること
        for i in range(N):
            prob += lpSum([x[i][j] for j in range(M)]) == 1
        # 各8個まで
        for j in range(M):
            prob += lpSum([x[i][j] for i in range(N)]) <= num_cap[j]
        # それぞれの容量以下
        for j in range(M):
            prob += lpSum([x[i][j] * costs[i]
                           for i in range(N)]) <= (watt_cap[j] + y)

        # CAN制約
        prob += lpSum([x[idx][0] for idx in can_indices]) <= can_master_max
        for j in range(1, M):
            prob += lpSum([x[idx][j] for idx in can_indices]) <= can_remote_max

        # EN制約
        for j in range(M):
            prob += lpSum([x[idx][j] for idx in en_indices]) <= en_max

        ## ソルバー実行
        solver = pulp.PULP_CBC_CMD(maxSeconds=1)
        prob.solve(solver=solver)
        status = prob.status
        ret = status == 1 and y.value() == 0

        if ret:
            print("最小は {} 個".format(remote_num))
            for j in range(M):
                units = [costs[i] for i in range(N) if x[i][j].value() == 1]
                units_name = [
                    units_info[transpose_indices[i]][0] for i in range(N)
                    if x[i][j].value() == 1
                ]
                n = sum([x[i][j].value() for i in range(N)])
                sm = sum(units)
                units = ["{:3d}".format(c) for c in units]
                units_name = [c.rjust(4) for c in units_name]
                for i in range(8 - len(units)):
                    units.append("   ")
                    units_name.append("   ")
                ret1 = ", ".join(units)
                ret2 = ", ".join(units_name)
                print("リモート{} には {}個 計 {} [{}] [{}]".format(
                    j, n, sm, ret1, ret2))
        else:
            print("{}は無理".format(remote_num), status, y.value())

        return ret
Exemplo n.º 10
0
    def get_different_adjacent_colors(width, height, image, colors,
                                      color_algorithm):
        from pulp import LpProblem, LpVariable, LpMinimize, lpSum, PULP_CBC_CMD

        edges = set()

        mapping = {}
        n = 0

        for x in range(width):
            for y in range(height):
                for xd, yd in ((0, 1), (1, 0), (-1, 0), (0, -1)):
                    xn, yn = x + xd, y + yd

                    if not 0 <= xn < width or not 0 <= yn < height:
                        continue

                    i1, i2 = image[x][y], image[xn][yn]

                    if i1 < i2:
                        if i1 not in mapping:
                            n += 1
                            mapping[n] = i1
                            mapping[i1] = n

                        if i2 not in mapping:
                            n += 1
                            mapping[n] = i2
                            mapping[i2] = n

                        edges.add((mapping[i1], mapping[i2]))

        edges = list(edges)
        model = LpProblem(sense=LpMinimize)

        chromatic_number = LpVariable(name="chromatic number", cat='Integer')
        variables = [[LpVariable(name=f"x_{i}_{j}", cat='Binary') \
                      for i in range(n)] for j in range(n)]

        for i in range(n):
            model += lpSum(variables[i]) == 1
        for u, v in edges:
            for color in range(n):
                model += variables[u - 1][color] + variables[v - 1][color] <= 1
        for i in range(n):
            for j in range(n):
                model += chromatic_number >= (j + 1) * variables[i][j]

        if color_algorithm == ColorAlgorithm.least_possible:
            model += chromatic_number
        else:
            model += chromatic_number == len(colors)

        status = model.solve(PULP_CBC_CMD(msg=False))

        if chromatic_number.value() > len(colors):
            Utilities.error(
                "Not enough colors to color without adjacent areas having the same one!"
            )

        return {
            mapping[variable + 1]: colors[color]
            for variable in range(n) for color in range(n)
            if variables[variable][color].value() == 1
        }