Exemplo n.º 1
0
def load_objectives_from_gdm_file(p: DTLZ1P, obj_path: str):
    with open(obj_path) as f:
        content = f.readlines()
        # you may also want to remove whitespace characters like `\n` at the end of each line
    content = [x.strip() for x in content]
    _bag = []
    for element in content:
        line = clean_line(element)
        solution_ = p.generate_solution()
        solution_.objectives = [float(_var) for _var in line[:p.number_of_objectives]]
        _bag.append(solution_)
        print(solution_.objectives)
    print(len(_bag))
    preference = ITHDMPreferences(p.objectives_type, p.instance_.attributes['models'][0])
    best_compromise = p.generate_existing_solution(p.instance_.attributes['best_compromise'][0], True)

    max_net_score = 0

    # Make ROI
    print('best compromise', best_compromise.objectives)
    for x in _bag:
        preference.compare(x, best_compromise)
        x.attributes['net_score'] = preference.sigmaXY
        if max_net_score < preference.sigmaXY:
            max_net_score = preference.sigmaXY
            best_compromise = x

    roi = list(filter(lambda x: x.attributes['net_score'] >= preference.preference_model.beta, _bag))
    print('Best compromise')
    print(best_compromise.objectives, best_compromise.attributes['net_score'])
    print('ROI', len(roi))
    for x in roi:
        print(x.objectives, x.attributes['net_score'])
Exemplo n.º 2
0
class BestCompromise:
    """
    Looking for a best compromise in a solution sample using the dm preferences
    """
    def __init__(self,
                 problem: GDProblem,
                 sample_size=1000,
                 dm: int = 0,
                 k: int = 100000):
        self.problem = problem
        self.sample_size = sample_size
        self.dm = dm
        self.k = k
        self.preference = ITHDMPreferences(
            problem.objectives_type,
            problem.instance_.attributes['models'][dm])
        self.ranking = ITHDMRanking(self.preference, [-1], [-1])

    def make(self) -> Tuple[Solution, List[Solution]]:
        """returns candidate solutions
        Generates a sample of feasible solutions and compares them looking for an xPy
        or xSy relationship, finally orders the candidate solutions by crowding distance
        """
        bag = []
        while len(bag) < self.k:
            print('Check xPy inner :', len(bag))
            sample = [
                self.problem.generate_solution()
                for _ in range(self.sample_size)
            ]
            candidates = self.ranking.compute_ranking(sample)
            if len(candidates) != 0:
                bag += candidates[0]
        print('Candidates size: ', len(bag))
        max_net_score = 0
        best_compromise = None
        for s in bag:
            if max_net_score < s.attributes['net_score']:
                max_net_score = s.attributes['net_score']
                best_compromise = s
        bag.remove(best_compromise)
        # Make ROI
        for x in bag:
            self.preference.compare(x, best_compromise)
            x.attributes['net_score'] = self.preference.sigmaXY

        roi = list(
            filter(
                lambda p: p.attributes['net_score'] >= self.preference.
                preference_model.beta, bag))
        return best_compromise, [best_compromise] + roi
Exemplo n.º 3
0
    def _desc_rule(self, x: Solution, dm: int) -> int:
        preferences = ITHDMPreferences(self.problem.objectives_type,
                                       self.problem.get_preference_model(dm))
        category = -1
        r1 = self.problem.instance_.attributes['r1'][dm]
        for idx in range(len(r1)):
            self.w.objectives = r1[idx]
            if preferences.compare(x, self.w) <= -1:
                category = idx
                break

        if r1 != -1:
            category += len(r1)
        r2 = self.problem.instance_.attributes['r2'][dm]
        for idx in range(len(r2)):
            self.w.objectives = r2[idx]
            if preferences.compare(x, self.w) <= -1:
                category = idx
                break

        return category
Exemplo n.º 4
0
 def _is_high_dis(self, x: Solution, dm: int) -> bool:
     """
         The DM is strongly dissatisfied with x if for each w in R1 we have wP(Beta, Lambda)x.
     """
     preferences = ITHDMPreferences(self.problem.objectives_type,
                                    self.problem.get_preference_model(dm))
     r1 = self.problem.instance_.attributes['r1'][dm]
     for idx in range(len(r1)):
         self.w.objectives = r1[idx]
         if preferences.compare(self.w, x) > -1:
             return False
     return True
Exemplo n.º 5
0
 def _is_high_sat(self, x: Solution, dm: int) -> bool:
     """
        The DM is highly satisfied with a satisfactory x if for each w in R2 we have xPr(Beta,Lambda)w
     """
     preferences = ITHDMPreferences(self.problem.objectives_type,
                                    self.problem.get_preference_model(dm))
     r2 = self.problem.instance_.attributes['r2'][dm]
     for idx in range(len(r2)):
         self.w.objectives = r2[idx]
         if preferences.compare(x, self.w) > -1:
             return False
     return True
Exemplo n.º 6
0
    def _asc_rule(self, x: Solution, dm: int) -> int:
        preferences = ITHDMPreferences(self.problem.objectives_type,
                                       self.problem.get_preference_model(dm))
        r2 = self.problem.instance_.attributes['r2'][dm]
        category = -1
        for idx in range(len(r2)):
            self.w.objectives = r2[idx]
            v = preferences.compare(self.w, x)
            if v <= -1 or v == 2:
                category = idx
                break

        if category != -1:
            return category
        r1 = self.problem.instance_.attributes['r1'][dm]
        for idx in range(len(r1)):
            self.w.objectives = r1[idx]
            if preferences.compare(self.w, x) <= -1:
                category = idx
                break
        if category == -1:
            return category
        return category + len(r2)